diff --git a/src/riot-rs-runqueue/src/lib.rs b/src/riot-rs-runqueue/src/lib.rs index 4112f4862..79a58b4a1 100644 --- a/src/riot-rs-runqueue/src/lib.rs +++ b/src/riot-rs-runqueue/src/lib.rs @@ -1,7 +1,8 @@ #![cfg_attr(not(test), no_std)] +#![feature(min_specialization)] mod runqueue; -pub use runqueue::{RunQueue, RunqueueId, ThreadId}; +pub use runqueue::{CoreId, GlobalRunqueue, RunQueue, RunqueueId, ThreadId}; #[cfg(test)] mod tests { @@ -15,24 +16,24 @@ mod tests { runqueue.add(1, 0); runqueue.add(2, 0); - assert_eq!(runqueue.get_next(), Some(0)); + assert_eq!(runqueue.get_next(0), Some(0)); - runqueue.advance(0); + runqueue.advance(0, 0); - assert_eq!(runqueue.get_next(), Some(1)); - runqueue.advance(0); + assert_eq!(runqueue.get_next(0), Some(1)); + runqueue.advance(0, 0); - assert_eq!(runqueue.get_next(), Some(2)); - assert_eq!(runqueue.get_next(), Some(2)); + assert_eq!(runqueue.get_next(0), Some(2)); + assert_eq!(runqueue.get_next(0), Some(2)); - runqueue.advance(0); - assert_eq!(runqueue.get_next(), Some(0)); + runqueue.advance(0, 0); + assert_eq!(runqueue.get_next(0), Some(0)); - runqueue.advance(0); - assert_eq!(runqueue.get_next(), Some(1)); + runqueue.advance(0, 0); + assert_eq!(runqueue.get_next(0), Some(1)); - runqueue.advance(0); - assert_eq!(runqueue.get_next(), Some(2)); + runqueue.advance(0, 0); + assert_eq!(runqueue.get_next(0), Some(2)); } #[test] @@ -44,13 +45,13 @@ mod tests { } for i in 0..=31 { - assert_eq!(runqueue.get_next(), Some(i)); - runqueue.advance(0); + assert_eq!(runqueue.get_next(0), Some(i)); + runqueue.advance(0, 0); } for i in 0..=31 { - assert_eq!(runqueue.get_next(), Some(i)); - runqueue.advance(0); + assert_eq!(runqueue.get_next(0), Some(i)); + runqueue.advance(0, 0); } } @@ -65,17 +66,17 @@ mod tests { runqueue.add(2, 1); runqueue.add(4, 1); - assert_eq!(runqueue.get_next(), Some(2)); + assert_eq!(runqueue.get_next(0), Some(2)); runqueue.del(2, 1); - assert_eq!(runqueue.get_next(), Some(4)); + assert_eq!(runqueue.get_next(0), Some(4)); runqueue.del(4, 1); - assert_eq!(runqueue.get_next(), Some(0)); + assert_eq!(runqueue.get_next(0), Some(0)); runqueue.del(0, 0); - assert_eq!(runqueue.get_next(), Some(1)); + assert_eq!(runqueue.get_next(0), Some(1)); runqueue.del(1, 0); - assert_eq!(runqueue.get_next(), Some(3)); + assert_eq!(runqueue.get_next(0), Some(3)); runqueue.del(3, 0); - assert_eq!(runqueue.get_next(), None); + assert_eq!(runqueue.get_next(0), None); } #[test] fn test_push_twice() { @@ -84,15 +85,173 @@ mod tests { runqueue.add(0, 0); runqueue.add(1, 0); - assert_eq!(runqueue.get_next(), Some(0)); + assert_eq!(runqueue.get_next(0), Some(0)); runqueue.del(0, 0); - assert_eq!(runqueue.get_next(), Some(1)); + assert_eq!(runqueue.get_next(0), Some(1)); runqueue.add(0, 0); - assert_eq!(runqueue.get_next(), Some(1)); + assert_eq!(runqueue.get_next(0), Some(1)); - runqueue.advance(0); - assert_eq!(runqueue.get_next(), Some(0)); + runqueue.advance(0, 0); + assert_eq!(runqueue.get_next(0), Some(0)); + } + + #[test] + fn multicore_basic() { + let mut runqueue: RunQueue<8, 32, 4> = RunQueue::new(); + + // First thread should get allocated to core 0. + assert_eq!(runqueue.add(0, 0), Some(0)); + // Second thread should get allocated to core 1. + assert_eq!(runqueue.add(1, 0), Some(1)); + + assert_eq!(runqueue.get_next(0), Some(0)); + assert_eq!(runqueue.get_next(1), Some(1)); + assert!(runqueue.get_next(2).is_none()); + + // Advancing a runqueue shouldn't change any allocations + // if all threads in the queue are already running. + assert_eq!(runqueue.advance(0, 0), None); + assert_eq!(runqueue.get_next(0), Some(0)); + assert_eq!(runqueue.get_next(1), Some(1)); + assert!(runqueue.get_next(2).is_none()); + + // Restores original order. + assert_eq!(runqueue.advance(1, 0), None); + + // Add more threads, which should be allocated to free + // cores. + assert_eq!(runqueue.add(2, 0), Some(2)); + assert_eq!(runqueue.add(3, 0), Some(3)); + assert_eq!(runqueue.add(4, 0), None); + assert_eq!(runqueue.get_next(0), Some(0)); + assert_eq!(runqueue.get_next(1), Some(1)); + assert_eq!(runqueue.get_next(2), Some(2)); + assert_eq!(runqueue.get_next(3), Some(3)); + + // Advancing the runqueue now should change the mapping + // on core 0, since the previous head was running there. + assert_eq!(runqueue.advance(0, 0), Some(0)); + assert_eq!(runqueue.get_next(0), Some(4)); + // Other allocations shouldn't change. + assert_eq!(runqueue.get_next(1), Some(1)); + assert_eq!(runqueue.get_next(2), Some(2)); + assert_eq!(runqueue.get_next(3), Some(3)); + + // Adding or deleting waiting threads shouldn't change + // any allocations. + assert_eq!(runqueue.del(0, 0), None); + assert_eq!(runqueue.add(5, 0), None); + + // Deleting a running thread should allocate the waiting + // thread to the now free core. + assert_eq!(runqueue.del(2, 0), Some(2)); + assert_eq!(runqueue.get_next(2), Some(5)); + // Other allocations shouldn't change. + assert_eq!(runqueue.get_next(0), Some(4)); + assert_eq!(runqueue.get_next(1), Some(1)); + assert_eq!(runqueue.get_next(3), Some(3)); + } + + #[test] + fn multicore_multiqueue() { + let mut runqueue: RunQueue<8, 32, 4> = RunQueue::new(); + + assert_eq!(runqueue.add(0, 2), Some(0)); + assert_eq!(runqueue.add(1, 2), Some(1)); + assert_eq!(runqueue.add(2, 1), Some(2)); + assert_eq!(runqueue.add(3, 0), Some(3)); + assert_eq!(runqueue.add(4, 0), None); + + assert_eq!(runqueue.get_next(0), Some(0)); + assert_eq!(runqueue.get_next(1), Some(1)); + assert_eq!(runqueue.get_next(2), Some(2)); + assert_eq!(runqueue.get_next(3), Some(3)); + + // Advancing highest priority queue shouldn't change anything + // because there are more cores than threads in this priority's queue. + assert_eq!(runqueue.advance(0, 2), None); + assert_eq!(runqueue.get_next(0), Some(0)); + assert_eq!(runqueue.get_next(1), Some(1)); + assert_eq!(runqueue.get_next(2), Some(2)); + assert_eq!(runqueue.get_next(3), Some(3)); + + // Advancing lowest priority queue should change allocations + // since there are two threads in this priority's queue, + // but only one available core for them. + + // Core 3 was newly allocated. + assert_eq!(runqueue.advance(3, 0), Some(3)); + assert_eq!(runqueue.get_next(3), Some(4)); + // Other allocations didn't change. + assert_eq!(runqueue.get_next(0), Some(0)); + assert_eq!(runqueue.get_next(1), Some(1)); + assert_eq!(runqueue.get_next(2), Some(2)); + + // Restores original order. + runqueue.advance(4, 0); + + // Delete one high-priority thread. + // The waiting low-priority thread should be allocated + // to the newly freed core. + + // Core 0 was newly allocated. + assert_eq!(runqueue.del(0, 2), Some(0)); + assert_eq!(runqueue.get_next(0), Some(4)); + // Other allocations didn't change. + assert_eq!(runqueue.get_next(1), Some(1)); + assert_eq!(runqueue.get_next(2), Some(2)); + assert_eq!(runqueue.get_next(3), Some(3)); + + // Add one medium-priority thread. + // The low-priority thread furthest back in its priority queue + // should be preempted. + + // Core 0 was newly allocated. + assert_eq!(runqueue.add(5, 1), Some(0)); + assert_eq!(runqueue.get_next(0), Some(5)); + // Other allocations didn't change. + assert_eq!(runqueue.get_next(1), Some(1)); + assert_eq!(runqueue.get_next(2), Some(2)); + assert_eq!(runqueue.get_next(3), Some(3)); + } + + #[test] + fn multicore_invalid_core() { + let mut runqueue: RunQueue<8, 32, 1> = RunQueue::new(); + assert_eq!(runqueue.add(0, 2), Some(0)); + assert_eq!(runqueue.add(1, 2), None); + assert_eq!(runqueue.get_next(0), Some(0)); + assert_eq!(runqueue.get_next(0), Some(0)); + // Querying for n > `N_CORES` shouldn't cause a panic. + assert_eq!(runqueue.get_next(1), None) + } + + #[test] + fn multicore_advance() { + let mut runqueue: RunQueue<8, 32, 4> = RunQueue::new(); + assert_eq!(runqueue.add(0, 0), Some(0)); + assert_eq!(runqueue.add(1, 0), Some(1)); + assert_eq!(runqueue.add(2, 0), Some(2)); + assert_eq!(runqueue.add(3, 0), Some(3)); + assert_eq!(runqueue.add(4, 0), None); + assert_eq!(runqueue.add(5, 0), None); + + // Advance head. + assert_eq!(runqueue.advance(0, 0), Some(0)); + assert_eq!(runqueue.get_next(0), Some(4)); + // Other allocations didn't change. + assert_eq!(runqueue.get_next(1), Some(1)); + assert_eq!(runqueue.get_next(2), Some(2)); + assert_eq!(runqueue.get_next(3), Some(3)); + + // Advance from a thread that is not head. + assert_eq!(runqueue.advance(2, 0), Some(2)); + assert_eq!(runqueue.get_next(2), Some(5)); + // Other allocations didn't change. + assert_eq!(runqueue.get_next(0), Some(4)); + assert_eq!(runqueue.get_next(1), Some(1)); + assert_eq!(runqueue.get_next(3), Some(3)); } } diff --git a/src/riot-rs-runqueue/src/runqueue.rs b/src/riot-rs-runqueue/src/runqueue.rs index 80581931f..15d3125ab 100644 --- a/src/riot-rs-runqueue/src/runqueue.rs +++ b/src/riot-rs-runqueue/src/runqueue.rs @@ -10,6 +10,19 @@ const USIZE_BITS: usize = mem::size_of::() * 8; /// Runqueue number. pub type RunqueueId = u8; pub type ThreadId = u8; +pub type CoreId = u8; + +trait FromBitmap: Sized { + fn from_bitmap(bitmap: usize) -> Option; +} +impl FromBitmap for u8 { + fn from_bitmap(bitmap: usize) -> Option { + if bitmap == 0 { + return None; + } + Some(ffs(bitmap) as u8 - 1) + } +} /// Runqueue for `N_QUEUES`, supporting `N_THREADS` total. /// @@ -22,82 +35,279 @@ pub type ThreadId = u8; /// special value) /// /// The current implementation needs an usize for the bit cache, -/// an `[u8; N_QUEUES]` array for the list tail indexes -/// and an `[u8; N_THREADS]` for the list next indexes. -pub struct RunQueue { +/// an `[RunqueueId; N_QUEUES]` array for the list tail indexes +/// and an `[ThreadId; N_THREADS]` for the list next indexes. +pub struct RunQueue { /// Bitcache that represents the currently used queues /// in `0..N_QUEUES`. bitcache: usize, queues: clist::CList, + next: [Option; N_CORES], } -impl RunQueue<{ N_QUEUES }, { N_THREADS }> { +impl + RunQueue +{ // NOTE: we don't impl Default here because hax does not support it yet. When it does, we // should impl it. #[allow(clippy::new_without_default)] - pub const fn new() -> RunQueue<{ N_QUEUES }, { N_THREADS }> { + pub const fn new() -> RunQueue { // unfortunately we cannot assert!() on N_QUEUES and N_THREADS, // as panics in const fn's are not (yet) implemented. RunQueue { bitcache: 0, queues: CList::new(), + next: [None; N_CORES], + } + } + + /// Returns the next thread that should run on this core. + pub fn get_next(&self, core: CoreId) -> Option { + if core as usize >= N_CORES { + return None; + } + self.next[core as usize] + } + + /// Returns the `n` highest priority threads in the [`RunQueue`]. + /// + /// This iterates through all non-empty runqueues with descending + /// priority, until `N_CORES` threads have been found or all + /// queues have been checked. + /// + /// Complexity is O(n). + fn get_next_n(&self) -> [Option; N_CORES] { + let mut next_list = [None; N_CORES]; + let mut bitcache = self.bitcache; + // Get head from highest priority queue. + let mut head = match self.peek_head(bitcache) { + Some(head) => { + next_list[0] = Some(head); + head + } + None => return next_list, + }; + let mut thread = head; + // Iterate through threads in the queue. + for i in 1..N_CORES { + thread = self.queues.peek_next(thread); + if thread == head { + // Switch to next runqueue. + bitcache &= !(1 << (ffs(bitcache) - 1)); + head = match self.peek_head(bitcache) { + Some(h) => h, + // Early return instead of break, to make hax happy. + None => return next_list, + }; + thread = head; + }; + next_list[i] = Some(thread); } + next_list + } + + #[inline] + fn peek_head(&self, bitcache: usize) -> Option { + // Switch to highest priority runqueue remaining + // in the bitcache. + let rq = match RunqueueId::from_bitmap(bitcache) { + Some(rq) => rq, + None => return None, + }; + self.queues.peek_head(rq) } +} +pub trait GlobalRunqueue { /// Adds thread with pid `n` to runqueue number `rq`. - pub fn add(&mut self, n: ThreadId, rq: RunqueueId) { + /// + /// Returns a [`CoreId`] if the allocation for this core changed. + /// + fn add(&mut self, n: ThreadId, rq: RunqueueId) -> Option; + + /// Removes thread with pid `n` from runqueue number `rq`. + /// + /// Returns a [`CoreId`] if the allocation for this core changed. + /// + /// # Panics + /// + /// Panics for `N_CORES == 1`` if `n` is not the queue's head. + /// This is fine, RIOT-rs only ever calls `del()` for the current thread. + fn del(&mut self, n: ThreadId, rq: RunqueueId) -> Option; + + /// Advances from thread `n` in runqueue number `rq`. + /// + /// This is used to "yield" to another thread of *the same* priority. + /// + /// Returns a [`CoreId`] if the allocation for this core changed. + /// + /// **Warning: If `n` it not head if the run queue, this changes + /// the order of the queue because the thread is moved to the + /// tail.** + fn advance(&mut self, n: ThreadId, rq: RunqueueId) -> Option; + + /// Update `self.next` so that the highest `N_CORES` threads + /// are allocated. + /// + /// This only changes allocations if a thread was previously allocated + /// and is now not part of the new list anymore, or the other way around. + /// It assumes that there was maximum one change in the runqueue since the + /// last reallocation (only one add/ delete or a runqueue advancement)! + /// + /// Returns a [`CoreId`] if the allocation for this core changed. + fn reallocate(&mut self) -> Option; +} + +impl + GlobalRunqueue for RunQueue +{ + default fn add(&mut self, n: ThreadId, rq: RunqueueId) -> Option { debug_assert!((n as usize) < N_THREADS); debug_assert!((rq as usize) < N_QUEUES); self.bitcache |= 1 << rq; self.queues.push(n, rq); + self.reallocate() } - /// Removes thread with pid `n` from runqueue number `rq`. + default fn del(&mut self, n: ThreadId, rq: RunqueueId) -> Option { + debug_assert!((n as usize) < N_THREADS); + debug_assert!((rq as usize) < N_QUEUES); + + if self.queues.peek_head(rq) == Some(n) { + let popped = self.queues.pop_head(rq); + assert_eq!(popped, Some(n)); + } else { + self.queues.del(n, rq); + } + + if self.queues.is_empty(rq) { + self.bitcache &= !(1 << rq); + } + self.reallocate() + } + + default fn advance(&mut self, n: ThreadId, rq: RunqueueId) -> Option { + debug_assert!((rq as usize) < N_QUEUES); + if Some(n) == self.queues.peek_head(rq) { + self.queues.advance(rq); + } else { + // If the thread is not the head remove it + // from queue and re-insert it at tail. + self.queues.del(n, rq); + self.queues.push(n, rq); + } + self.reallocate() + } + + default fn reallocate(&mut self) -> Option { + let next = self.get_next_n(); + let mut bitmap_next = 0; + let mut bitmap_allocated = 0; + for i in 0..N_CORES { + if let Some(id) = next[i] { + bitmap_next |= 1 << id + } + if let Some(id) = self.next[i] { + bitmap_allocated |= 1 << id + } + } + if bitmap_next == bitmap_allocated { + return None; + } + let diff = bitmap_next ^ bitmap_allocated; + let prev_allocated = ThreadId::from_bitmap(bitmap_allocated & diff); + let new_allocated = ThreadId::from_bitmap(bitmap_next & diff); + + let changed_core = self.next.iter().position(|i| *i == prev_allocated).unwrap(); + self.next[changed_core] = new_allocated; + return Some(changed_core as CoreId); + } +} + +impl GlobalRunqueue + for RunQueue +{ + /// Advances runqueue number `rq`. /// - /// # Panics + /// This is used to "yield" to another thread of *the same* priority. /// - /// Panics if `n` is not the queue's head. - /// This is fine, RIOT-rs only ever calls `del()` for the current thread. - pub fn del(&mut self, n: ThreadId, rq: RunqueueId) { + /// Returns a [`CoreId`] if the allocation for this core changed. + fn advance(&mut self, _: ThreadId, rq: RunqueueId) -> Option { + debug_assert!((rq as usize) < N_QUEUES); + self.queues.advance(rq); + self.reallocate() + } + + #[inline] + fn del(&mut self, n: ThreadId, rq: RunqueueId) -> Option { debug_assert!((n as usize) < N_THREADS); debug_assert!((rq as usize) < N_QUEUES); let popped = self.queues.pop_head(rq); - // assert_eq!(popped, Some(n)); if self.queues.is_empty(rq) { self.bitcache &= !(1 << rq); } + self.reallocate() } - fn ffs(val: usize) -> u32 { - USIZE_BITS as u32 - val.leading_zeros() + #[inline] + fn reallocate(&mut self) -> Option { + let next = self.peek_head(self.bitcache); + if next == self.next[0] { + return None; + } + self.next[0] = next; + return Some(0); } +} - /// Returns the pid that should run next. - /// - /// Returns the next runnable thread of - /// the runqueue with the highest index. - // - // TODO: Return `ThreadId` instead of u8? - pub fn get_next(&self) -> Option { - let rq_ffs = Self::ffs(self.bitcache); - if rq_ffs > 0 { - let rq = (rq_ffs - 1) as RunqueueId; - self.queues.peek_head(rq) +impl GlobalRunqueue + for RunQueue +{ + fn reallocate(&mut self) -> Option { + let next = self.get_next_n(); + + if self.next[0] == next[0] { + if self.next[1] == next[1] { + return None; + } + self.next[1] = next[1]; + return Some(1); + } + if self.next[1] == next[0] { + if self.next[0] == next[1] { + return None; + } + self.next[0] = next[1]; + return Some(0); + } + if self.next[1] == next[1] { + self.next[0] = next[0]; + return Some(0); } else { - None + self.next[1] = next[0]; + Some(1) } } - /// Advances runqueue number `rq`. - /// - /// This is used to "yield" to another thread of *the same* priority. - pub fn advance(&mut self, rq: RunqueueId) { + fn advance(&mut self, n: ThreadId, rq: RunqueueId) -> Option { debug_assert!((rq as usize) < N_QUEUES); - self.queues.advance(rq) + if Some(n) == self.queues.peek_head(rq) { + self.queues.advance(rq); + } else { + // If the thread is not the head remove it + // from queue and re-insert it at tail. + self.queues.pop_next(rq); + self.queues.push(n, rq); + } + self.reallocate() } } +fn ffs(val: usize) -> u32 { + USIZE_BITS as u32 - val.leading_zeros() +} + mod clist { //! This module implements an array of `N_QUEUES` circular linked lists over an //! array of size `N_THREADS`. @@ -107,8 +317,8 @@ mod clist { #[derive(Debug, Copy, Clone)] pub struct CList { - tail: [u8; N_QUEUES], - next_idxs: [u8; N_THREADS], + tail: [RunqueueId; N_QUEUES], + next_idxs: [ThreadId; N_THREADS], } impl CList { @@ -148,7 +358,43 @@ mod clist { } } - pub fn pop_head(&mut self, rq: RunqueueId) -> Option { + /// Delete a thread from the runqueue. + pub fn del(&mut self, n: ThreadId, rq: RunqueueId) { + if self.next_idxs[n as usize] == Self::sentinel() { + // Thread is not in rq, do nothing. + return; + } + + if self.next_idxs[n as usize] == n { + // `n` should always be the tail in this case, but better be + // safe and double-check. + if self.tail[rq as usize] == n { + // `n` bites itself, so there's only one entry. + // Clear tail. + self.tail[rq as usize] = Self::sentinel(); + } + } else { + let next = self.next_idxs[n as usize]; + + // Find previous in list and update its next-idx. + let prev = self + .next_idxs + .iter() + .position(|next_idx| *next_idx == n) + .expect("List is circular."); + self.next_idxs[prev] = next as ThreadId; + + // Update tail if the thread was the tail. + if self.tail[rq as usize] == n { + self.tail[rq as usize] = prev as ThreadId; + } + } + + // Clear thread's value. + self.next_idxs[n as usize] = Self::sentinel(); + } + + pub fn pop_head(&mut self, rq: RunqueueId) -> Option { if self.tail[rq as usize] == Self::sentinel() { // rq is empty, do nothing None @@ -171,7 +417,8 @@ mod clist { } } - pub fn peek_head(&self, rq: RunqueueId) -> Option { + #[inline] + pub fn peek_head(&self, rq: RunqueueId) -> Option { if self.tail[rq as usize] == Self::sentinel() { None } else { @@ -184,6 +431,22 @@ mod clist { self.tail[rq as usize] = self.next_idxs[self.tail[rq as usize] as usize]; } } + + pub fn peek_next(&self, curr: ThreadId) -> ThreadId { + self.next_idxs[curr as usize] + } + + /// Remove next thread after head in runqueue. + pub fn pop_next(&mut self, rq: RunqueueId) -> Option { + let head = self.peek_head(rq)?; + let next = self.peek_next(head); + if next == head { + return None; + } + self.next_idxs[head as usize] = self.next_idxs[next as usize]; + self.next_idxs[next as usize] = Self::sentinel(); + Some(next) + } } #[cfg(test)] @@ -229,11 +492,11 @@ mod clist { assert!(clist.is_empty(0)); for i in 0..(N - 1) { println!("pushing {}", i); - clist.push(i as u8, 0); + clist.push(i as ThreadId, 0); } for i in 0..(N - 1) { println!("{}", i); - assert_eq!(clist.pop_head(0), Some(i as u8)); + assert_eq!(clist.pop_head(0), Some(i as ThreadId)); } assert_eq!(clist.pop_head(0), None); assert!(clist.is_empty(0)); diff --git a/src/riot-rs-threads/src/arch/cortex_m.rs b/src/riot-rs-threads/src/arch/cortex_m.rs index 559977044..b96b1397f 100644 --- a/src/riot-rs-threads/src/arch/cortex_m.rs +++ b/src/riot-rs-threads/src/arch/cortex_m.rs @@ -175,7 +175,7 @@ unsafe fn sched() -> usize { loop { { - if let Some(pid) = (unsafe { &*THREADS.as_ptr(cs) }).runqueue.get_next() { + if let Some(pid) = (unsafe { &*THREADS.as_ptr(cs) }).runqueue.get_next(0) { next_pid = pid; break; } diff --git a/src/riot-rs-threads/src/lib.rs b/src/riot-rs-threads/src/lib.rs index d403e8dcd..d8dc607a5 100644 --- a/src/riot-rs-threads/src/lib.rs +++ b/src/riot-rs-threads/src/lib.rs @@ -6,7 +6,7 @@ // invariants #![allow(clippy::indexing_slicing)] -use riot_rs_runqueue::RunQueue; +use riot_rs_runqueue::{GlobalRunqueue, RunQueue}; pub use riot_rs_runqueue::{RunqueueId, ThreadId}; mod arch; @@ -287,8 +287,10 @@ fn cleanup() -> ! { /// "Yields" to another thread with the same priority. pub fn yield_same() { THREADS.with_mut(|mut threads| { - let runqueue = threads.current().unwrap().prio; - threads.runqueue.advance(runqueue); + let thread = threads.current().unwrap(); + let runqueue = thread.prio; + let pid = thread.pid; + threads.runqueue.advance(pid, runqueue); schedule(); }) }