Skip to content

Commit

Permalink
Tickless scheduler
Browse files Browse the repository at this point in the history
This commit incorporates several changes:

Timers are now not set for a regular tick, they are set when a thread
may be preempted.  Specifically, the timer is set to the next timeout
that will trigger a scheduling operation.  This avoids timers
triggering a switch to the scheduler to do nothing (resume the currently
running thread).

This means that if a thread sleeps for ten ticks while another runs, we
will get one timer interrupt ten ticks in the future, rather than ten
interrupts one tick apart.

This means that ticks are now calculated retroactively based on elapsed
time, rather than counted on each context switch.

This, in turn, necessitates some small API changes.  We previously
conflated two things:

 - Sleep for N * (tick duration)
 - Yield and allow lower-priority threads to run for, at most, N * (tick
   duration)

These are now deconflated by adding a second parameter to thread_sleep.
Most sleeps are of the second form and so this is the default.

This reduces the time taken to run the test suite on Sonata by around
30% and in the Ibex SAFE simulator by 13%.
  • Loading branch information
davidchisnall committed Jun 5, 2024
1 parent 71ce352 commit 076892c
Show file tree
Hide file tree
Showing 10 changed files with 233 additions and 55 deletions.
2 changes: 1 addition & 1 deletion sdk/core/allocator/main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ namespace
// Drop and reacquire the lock while yielding.
// Sleep for a single tick.
g.unlock();
Timeout smallSleep{0};
Timeout smallSleep{1};
thread_sleep(&smallSleep);
if (!reacquire_lock(timeout, g, smallSleep.elapsed))
{
Expand Down
37 changes: 29 additions & 8 deletions sdk/core/scheduler/main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -270,15 +270,20 @@ namespace sched

ExceptionGuard g{[=]() { sched_panic(mcause, mepc, mtval); }};

bool tick = false;
switch (mcause)
{
// Explicit yield call
case MCAUSE_ECALL_MACHINE:
schedNeeded = true;
{
schedNeeded = true;
Thread *currentThread = Thread::current_get();
tick = currentThread && currentThread->is_ready();
break;
}
case MCAUSE_INTR | MCAUSE_MTIME:
Timer::do_interrupt();
schedNeeded = true;
tick = true;
break;
case MCAUSE_INTR | MCAUSE_MEXTERN:
schedNeeded = false;
Expand All @@ -293,6 +298,7 @@ namespace sched
std::tie(schedNeeded, std::ignore, std::ignore) =
futex_wake(Capability{&word}.address());
});
tick = schedNeeded;
break;
case MCAUSE_THREAD_EXIT:
// Make the current thread non-runnable.
Expand All @@ -305,13 +311,23 @@ namespace sched
// We cannot continue exiting this thread, make sure we will
// pick a new one.
schedNeeded = true;
tick = true;
sealedTStack = nullptr;
break;
default:
sched_panic(mcause, mepc, mtval);
}
if (tick || !Thread::any_ready())
{
Timer::expiretimers();
}
auto newContext =
schedNeeded ? Thread::schedule(sealedTStack) : sealedTStack;
#if 0
Debug::log("Thread: {}",
Thread::current_get() ? Thread::current_get()->id_get() : 0);
#endif
Timer::update();

if constexpr (Accounting)
{
Expand Down Expand Up @@ -351,7 +367,7 @@ namespace sched

if (shouldYield)
{
Thread::yield_interrupt_enabled();
yield();
}

return ret;
Expand Down Expand Up @@ -419,14 +435,17 @@ SystickReturn __cheri_compartment("sched") thread_systemtick_get()
}

__cheriot_minimum_stack(0x80) int __cheri_compartment("sched")
thread_sleep(Timeout *timeout)
thread_sleep(Timeout *timeout, uint32_t flags)
{
STACK_CHECK(0x80);
if (!check_timeout_pointer(timeout))
{
return -EINVAL;
}
Thread::current_get()->suspend(timeout, nullptr, true);
////Debug::log("Thread {} sleeping for {} ticks",
/// Thread::current_get()->id_get(), timeout->remaining);
Thread *current = Thread::current_get();
current->suspend(timeout, nullptr, true, !(flags & ThreadSleepNoEarlyWake));
return 0;
}

Expand Down Expand Up @@ -468,8 +487,10 @@ __cheriot_minimum_stack(0xa0) int futex_timed_wait(Timeout *timeout,
// If we try to block ourself, that's a mistake.
if ((owningThread == currentThread) || (owningThread == nullptr))
{
Debug::log("futex_timed_wait: invalid owning thread {}",
owningThread);
Debug::log("futex_timed_wait: thread {} acquiring PI futex with "
"invalid owning thread {}",
currentThread->id_get(),
owningThreadID);
return -EINVAL;
}
Debug::log("Thread {} boosting priority of {} for futex {}",
Expand Down Expand Up @@ -550,7 +571,7 @@ __cheriot_minimum_stack(0x90) int futex_wake(uint32_t *address, uint32_t count)

if (shouldYield)
{
Thread::yield_interrupt_enabled();
yield();
}

return woke;
Expand Down
72 changes: 54 additions & 18 deletions sdk/core/scheduler/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ namespace
// thread structures.
class MultiWaiterInternal;

uint64_t expiry_time_for_timeout(uint32_t timeout);

template<size_t NPrios>
class ThreadImpl final : private utils::NoCopyNoMove
{
Expand Down Expand Up @@ -115,23 +117,18 @@ namespace
}

/**
* When yielding inside the scheduler compartment, we almost always want
* to re-enable interrupts before ecall. If we don't, then a thread with
* interrupt enabled can just call a scheduler function with a long
* timeout, essentially gaining the ability to indefinitely block
* interrupts. Worse, if this is the only thread, then it blocks
* interrupts forever for the whole system.
* Returns true if any thread is ready to run.
*/
static void yield_interrupt_enabled()
static bool any_ready()
{
__asm volatile("ecall");
return priorityMap != 0;
}

static uint32_t yield_timed()
{
uint64_t ticksAtStart = ticksSinceBoot;

yield_interrupt_enabled();
yield();

uint64_t elapsed = ticksSinceBoot - ticksAtStart;
if (elapsed > std::numeric_limits<uint32_t>::max())
Expand All @@ -152,11 +149,12 @@ namespace
*/
bool suspend(Timeout *t,
ThreadImpl **newSleepQueue,
bool yieldUnconditionally = false)
bool yieldUnconditionally = false,
bool yieldNotSleep = false)
{
if (t->remaining != 0)
{
suspend(t->remaining, newSleepQueue);
suspend(t->remaining, newSleepQueue, yieldNotSleep);
}
if ((t->remaining != 0) || yieldUnconditionally)
{
Expand Down Expand Up @@ -188,6 +186,7 @@ namespace
OriginalPriority(priority),
expiryTime(-1),
state(ThreadState::Suspended),
isYielding(false),
sleepQueue(nullptr),
tStackPtr(tstack)
{
Expand All @@ -212,7 +211,7 @@ namespace
// We must be suspended.
Debug::Assert(state == ThreadState::Suspended,
"Waking thread that is in state {}, not suspended",
state);
static_cast<ThreadState>(state));
// First, remove self from the timer waiting list.
timer_list_remove(&waitingList);
if (sleepQueue != nullptr)
Expand All @@ -233,11 +232,18 @@ namespace
schedule = true;
}
}
// If this is the same priority as the current thread, we may need
// to update the timer.
if (priority >= highestPriority)
{
schedule = true;
}
if (reason == WakeReason::Timer || reason == WakeReason::Delete)
{
multiWaiter = nullptr;
}
list_insert(&priorityList[priority]);
isYielding = false;

return schedule;
}
Expand Down Expand Up @@ -278,11 +284,14 @@ namespace
* waiting on a resource, add it to the list of that resource. No
* matter what, it has to be added to the timer list.
*/
void suspend(uint32_t waitTicks, ThreadImpl **newSleepQueue)
void suspend(uint32_t waitTicks,
ThreadImpl **newSleepQueue,
bool yieldNotSleep = false)
{
isYielding = yieldNotSleep;
Debug::Assert(state == ThreadState::Ready,
"Suspending thread that is in state {}, not ready",
state);
static_cast<ThreadState>(state));
list_remove(&priorityList[priority]);
state = ThreadState::Suspended;
priority_map_remove();
Expand All @@ -291,8 +300,7 @@ namespace
list_insert(newSleepQueue);
sleepQueue = newSleepQueue;
}
expiryTime =
(waitTicks == UINT32_MAX ? -1 : ticksSinceBoot + waitTicks);
expiryTime = expiry_time_for_timeout(waitTicks);

timer_list_insert(&waitingList);
}
Expand Down Expand Up @@ -407,7 +415,7 @@ namespace
Debug::Assert(state == ThreadState::Suspended,
"Inserting thread into timer list that is in state "
"{}, not suspended",
state);
static_cast<ThreadState>(state));
if (head == nullptr)
{
timerNext = timerPrev = *headPtr = this;
Expand Down Expand Up @@ -511,6 +519,29 @@ namespace
return priority;
}

bool is_ready()
{
return state == ThreadState::Ready;
}

bool is_yielding()
{
return isYielding;
}

/**
* Returns true if there are other runnable threads with the same
* priority as this thread.
*/
bool has_priority_peers()
{
Debug::Assert(state == ThreadState::Ready,
"Checking for peers on thread that is in state {}, "
"not ready",
static_cast<ThreadState>(state));
return next != this;
}

~ThreadImpl()
{
// We have static definition of threads. We only create threads in
Expand Down Expand Up @@ -616,7 +647,12 @@ namespace
uint8_t priority;
/// The original priority level for this thread. This never changes.
const uint8_t OriginalPriority;
ThreadState state;
ThreadState state : 2;
/**
* If the thread is yielding, it may be scheduled before its timeout
* expires, as long as no other threads are runnable.
*/
bool isYielding : 1;
};

using Thread = ThreadImpl<ThreadPrioNum>;
Expand Down
Loading

0 comments on commit 076892c

Please sign in to comment.