Skip to content

Commit

Permalink
Tickless scheduler
Browse files Browse the repository at this point in the history
This commit incorporates several changes:

Timers are now not set for a regular tick, they are set when a thread
may be preempted.  Specifically, the timer is set to the next timeout
that will trigger a scheduling operation.  This avoids timers
triggering a switch to the scheduler to do nothing (resume the currently
running thread).

This means that if a thread sleeps for ten ticks while another runs, we
will get one timer interrupt ten ticks in the future, rather than ten
interrupts one tick apart.

This means that ticks are now calculated retroactively based on elapsed
time, rather than counted on each context switch.

This, in turn, necessitates some small API changes.  We previously
conflated two things:

 - Sleep for N * (tick duration)
 - Yield and allow lower-priority threads to run for, at most, N * (tick
   duration)

These are now deconflated by adding a second parameter to thread_sleep.
Most sleeps are of the second form and so this is the default.

This reduces the time taken to run the test suite on Sonata by around
30% and in the Ibex SAFE simulator by 13%.
  • Loading branch information
davidchisnall committed Jun 5, 2024
1 parent 71ce352 commit 26bd4a5
Show file tree
Hide file tree
Showing 10 changed files with 223 additions and 42 deletions.
2 changes: 1 addition & 1 deletion sdk/core/allocator/main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ namespace
// Drop and reacquire the lock while yielding.
// Sleep for a single tick.
g.unlock();
Timeout smallSleep{0};
Timeout smallSleep{1};
thread_sleep(&smallSleep);
if (!reacquire_lock(timeout, g, smallSleep.elapsed))
{
Expand Down
33 changes: 27 additions & 6 deletions sdk/core/scheduler/main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -270,15 +270,20 @@ namespace sched

ExceptionGuard g{[=]() { sched_panic(mcause, mepc, mtval); }};

bool tick = false;
switch (mcause)
{
// Explicit yield call
case MCAUSE_ECALL_MACHINE:
schedNeeded = true;
{
schedNeeded = true;
Thread *currentThread = Thread::current_get();
tick = currentThread && currentThread->is_ready();
break;
}
case MCAUSE_INTR | MCAUSE_MTIME:
Timer::do_interrupt();
schedNeeded = true;
tick = true;
break;
case MCAUSE_INTR | MCAUSE_MEXTERN:
schedNeeded = false;
Expand All @@ -293,6 +298,7 @@ namespace sched
std::tie(schedNeeded, std::ignore, std::ignore) =
futex_wake(Capability{&word}.address());
});
tick = schedNeeded;
break;
case MCAUSE_THREAD_EXIT:
// Make the current thread non-runnable.
Expand All @@ -305,13 +311,23 @@ namespace sched
// We cannot continue exiting this thread, make sure we will
// pick a new one.
schedNeeded = true;
tick = true;
sealedTStack = nullptr;
break;
default:
sched_panic(mcause, mepc, mtval);
}
if (tick || !Thread::any_ready())
{
Timer::expiretimers();
}
auto newContext =
schedNeeded ? Thread::schedule(sealedTStack) : sealedTStack;
#if 0
Debug::log("Thread: {}",
Thread::current_get() ? Thread::current_get()->id_get() : 0);
#endif
Timer::update();

if constexpr (Accounting)
{
Expand Down Expand Up @@ -419,14 +435,17 @@ SystickReturn __cheri_compartment("sched") thread_systemtick_get()
}

__cheriot_minimum_stack(0x80) int __cheri_compartment("sched")
thread_sleep(Timeout *timeout)
thread_sleep(Timeout *timeout, uint32_t flags)
{
STACK_CHECK(0x80);
if (!check_timeout_pointer(timeout))
{
return -EINVAL;
}
Thread::current_get()->suspend(timeout, nullptr, true);
////Debug::log("Thread {} sleeping for {} ticks",
/// Thread::current_get()->id_get(), timeout->remaining);
Thread *current = Thread::current_get();
current->suspend(timeout, nullptr, true, !(flags & ThreadSleepNoEarlyWake));
return 0;
}

Expand Down Expand Up @@ -468,8 +487,10 @@ __cheriot_minimum_stack(0xa0) int futex_timed_wait(Timeout *timeout,
// If we try to block ourself, that's a mistake.
if ((owningThread == currentThread) || (owningThread == nullptr))
{
Debug::log("futex_timed_wait: invalid owning thread {}",
owningThread);
Debug::log("futex_timed_wait: thread {} acquiring PI futex with "
"invalid owning thread {}",
currentThread->id_get(),
owningThreadID);
return -EINVAL;
}
Debug::log("Thread {} boosting priority of {} for futex {}",
Expand Down
64 changes: 55 additions & 9 deletions sdk/core/scheduler/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ namespace
// thread structures.
class MultiWaiterInternal;

uint64_t expiry_time_for_timeout(uint32_t timeout);

template<size_t NPrios>
class ThreadImpl final : private utils::NoCopyNoMove
{
Expand Down Expand Up @@ -114,6 +116,11 @@ namespace
return schedTStack;
}

static bool any_ready()
{
return priorityMap != 0;
}

/**
* When yielding inside the scheduler compartment, we almost always want
* to re-enable interrupts before ecall. If we don't, then a thread with
Expand Down Expand Up @@ -152,11 +159,12 @@ namespace
*/
bool suspend(Timeout *t,
ThreadImpl **newSleepQueue,
bool yieldUnconditionally = false)
bool yieldUnconditionally = false,
bool yieldNotSleep = false)
{
if (t->remaining != 0)
{
suspend(t->remaining, newSleepQueue);
suspend(t->remaining, newSleepQueue, yieldNotSleep);
}
if ((t->remaining != 0) || yieldUnconditionally)
{
Expand Down Expand Up @@ -188,6 +196,7 @@ namespace
OriginalPriority(priority),
expiryTime(-1),
state(ThreadState::Suspended),
isYielding(false),
sleepQueue(nullptr),
tStackPtr(tstack)
{
Expand All @@ -212,7 +221,7 @@ namespace
// We must be suspended.
Debug::Assert(state == ThreadState::Suspended,
"Waking thread that is in state {}, not suspended",
state);
static_cast<ThreadState>(state));
// First, remove self from the timer waiting list.
timer_list_remove(&waitingList);
if (sleepQueue != nullptr)
Expand All @@ -233,11 +242,18 @@ namespace
schedule = true;
}
}
// If this is the same priority as the current thread, we may need
// to update the timer.
if (priority >= highestPriority)
{
schedule = true;
}
if (reason == WakeReason::Timer || reason == WakeReason::Delete)
{
multiWaiter = nullptr;
}
list_insert(&priorityList[priority]);
isYielding = false;

return schedule;
}
Expand Down Expand Up @@ -278,11 +294,14 @@ namespace
* waiting on a resource, add it to the list of that resource. No
* matter what, it has to be added to the timer list.
*/
void suspend(uint32_t waitTicks, ThreadImpl **newSleepQueue)
void suspend(uint32_t waitTicks,
ThreadImpl **newSleepQueue,
bool yieldNotSleep = false)
{
isYielding = yieldNotSleep;
Debug::Assert(state == ThreadState::Ready,
"Suspending thread that is in state {}, not ready",
state);
static_cast<ThreadState>(state));
list_remove(&priorityList[priority]);
state = ThreadState::Suspended;
priority_map_remove();
Expand All @@ -291,8 +310,7 @@ namespace
list_insert(newSleepQueue);
sleepQueue = newSleepQueue;
}
expiryTime =
(waitTicks == UINT32_MAX ? -1 : ticksSinceBoot + waitTicks);
expiryTime = expiry_time_for_timeout(waitTicks);

timer_list_insert(&waitingList);
}
Expand Down Expand Up @@ -407,7 +425,7 @@ namespace
Debug::Assert(state == ThreadState::Suspended,
"Inserting thread into timer list that is in state "
"{}, not suspended",
state);
static_cast<ThreadState>(state));
if (head == nullptr)
{
timerNext = timerPrev = *headPtr = this;
Expand Down Expand Up @@ -511,6 +529,29 @@ namespace
return priority;
}

bool is_ready()
{
return state == ThreadState::Ready;
}

bool is_yielding()
{
return isYielding;
}

/**
* Returns true if there are other runnable threads with the same
* priority as this thread.
*/
bool has_priority_peers()
{
Debug::Assert(state == ThreadState::Ready,
"Checking for peers on thread that is in state {}, "
"not ready",
static_cast<ThreadState>(state));
return next != this;
}

~ThreadImpl()
{
// We have static definition of threads. We only create threads in
Expand Down Expand Up @@ -616,7 +657,12 @@ namespace
uint8_t priority;
/// The original priority level for this thread. This never changes.
const uint8_t OriginalPriority;
ThreadState state;
ThreadState state : 2;
/**
* If the thread is yielding, it may be scheduled before its timeout
* expires, as long as no other threads are runnable.
*/
bool isYielding : 1;
};

using Thread = ThreadImpl<ThreadPrioNum>;
Expand Down
85 changes: 78 additions & 7 deletions sdk/core/scheduler/timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,34 +28,72 @@ namespace

class Timer final : private TimerCore
{
inline static uint64_t lastTickTime = 0;
inline static uint32_t accumulatedTickError = 0;

public:
static void interrupt_setup()
{
static_assert(TIMERCYCLES_PER_TICK <= UINT32_MAX,
"Cycles per tick can't be represented in 32 bits. "
"Double check your platform config");
init();
setnext(TIMERCYCLES_PER_TICK);
}

static void do_interrupt()
using TimerCore::time;

static void update()
{
++Thread::ticksSinceBoot;
auto *thread = Thread::current_get();
bool waitingListIsEmpty = ((Thread::waitingList == nullptr) ||
(Thread::waitingList->expiryTime == -1));
bool threadHasNoPeers =
(thread == nullptr) || (!thread->has_priority_peers());
if (waitingListIsEmpty && threadHasNoPeers)
{
clear();
}
else
{
uint64_t nextTimer = waitingListIsEmpty
? time() + TIMERCYCLES_PER_TICK
: Thread::waitingList->expiryTime;
setnext(nextTimer);
}
}

expiretimers();
setnext(TIMERCYCLES_PER_TICK);
static uint64_t update_tick()
{
uint64_t now = time();
uint32_t elapsed = now - lastTickTime;
int32_t error = elapsed % TIMERCYCLES_PER_TICK;
if (elapsed < TIMERCYCLES_PER_TICK)
{
error = TIMERCYCLES_PER_TICK - error;
}
accumulatedTickError += error;
int32_t errorDirection = accumulatedTickError < 0 ? -1 : 1;
int32_t absoluteError = accumulatedTickError * errorDirection;
if (absoluteError >= TIMERCYCLES_PER_TICK)
{
Thread::ticksSinceBoot += errorDirection;
accumulatedTickError += TIMERCYCLES_PER_TICK * -errorDirection;
}
lastTickTime = now;
Thread::ticksSinceBoot += elapsed / TIMERCYCLES_PER_TICK;
return now;
}

private:
static void expiretimers()
{
uint64_t now = update_tick();
if (Thread::waitingList == nullptr)
{
return;
}
for (Thread *iter = Thread::waitingList;;)
{
if (iter->expiryTime <= Thread::ticksSinceBoot)
if (iter->expiryTime <= now)
{
Thread *iterNext = iter->timerNext;

Expand All @@ -72,6 +110,39 @@ namespace
break;
}
}
if (!Thread::any_ready())
{
for (Thread *iter = Thread::waitingList; iter;)
{
if (iter->is_yielding())
{
Debug::log("Woke thread {} {} cycles early",
iter->id_get(),
int64_t(iter->expiryTime) - now);
Thread *iterNext = iter->timerNext;
iter->ready(Thread::WakeReason::Timer);
iter = iterNext;
if (Thread::waitingList == nullptr ||
iter == Thread::waitingList)
{
break;
}
}
else
{
break;
}
}
}
}
};

uint64_t expiry_time_for_timeout(uint32_t timeout)
{
if (timeout == -1)
{
return -1;
}
return Timer::time() + (timeout * TIMERCYCLES_PER_TICK);
}
} // namespace
6 changes: 6 additions & 0 deletions sdk/core/switcher/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -970,7 +970,13 @@ __Z13thread_id_getv:
// Load the trusted stack pointer into a register that we will clobber in
// the next instruction when we load the thread ID.
cspecialr ca0, mtdc
//cgettag a1, ca0
// If this is a null pointer, don't try to dereference it and report that
// we are thread 0. This permits the debug code to work even from things
// that are not real threads.
//beqz a1, .Lend
clh a0, TrustedStack_offset_threadID(ca0)
.Lend:
cret


Expand Down
2 changes: 1 addition & 1 deletion sdk/include/FreeRTOS-Compat/task.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ static inline BaseType_t xTaskCheckForTimeOut(TimeOut_t *pxTimeOut,
static inline void vTaskDelay(const TickType_t xTicksToDelay)
{
struct Timeout timeout = {0, xTicksToDelay};
thread_sleep(&timeout);
thread_sleep(&timeout, 0);
}

/**
Expand Down
Loading

0 comments on commit 26bd4a5

Please sign in to comment.