Skip to content

Commit

Permalink
Revert "kernel/sched: Fix free-memory write when ISRs abort _current"
Browse files Browse the repository at this point in the history
This reverts commit 61c7062.

This PR introduced 2 regressions in main CI:
71977 & 71978
Let's revert it by now to get main's CI passing again.

(cherry picked from commit ea26bcf)

Original-Signed-off-by: Alberto Escolar Piedras <alberto.escolar.piedras@nordicsemi.no>
GitOrigin-RevId: ea26bcf
Change-Id: Iabd653233709b83ba798da93334e9bae547bc669
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/zephyr/+/5494021
Tested-by: ChromeOS Prod (Robot) <chromeos-ci-prod@chromeos-bot.iam.gserviceaccount.com>
Reviewed-by: Dawid Niedźwiecki <dawidn@google.com>
Tested-by: Dawid Niedźwiecki <dawidn@google.com>
Commit-Queue: Dawid Niedźwiecki <dawidn@google.com>
  • Loading branch information
aescolar authored and Chromeos LUCI committed Apr 26, 2024
1 parent 71cc82b commit e92efe1
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 42 deletions.
2 changes: 0 additions & 2 deletions kernel/include/ksched.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,6 @@ BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
#define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
#endif /* CONFIG_MULTITHREADING */

extern struct k_thread _thread_dummies[CONFIG_MP_MAX_NUM_CPUS];

void z_sched_init(void);
void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
void z_unpend_thread_no_timeout(struct k_thread *thread);
Expand Down
7 changes: 6 additions & 1 deletion kernel/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -657,7 +657,12 @@ FUNC_NORETURN void z_cstart(void)
LOG_CORE_INIT();

#if defined(CONFIG_MULTITHREADING)
z_dummy_thread_init(&_thread_dummies[0]);
/* Note: The z_ready_thread() call in prepare_multithreading() requires
* a dummy thread even if CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN=y
*/
struct k_thread dummy_thread;

z_dummy_thread_init(&dummy_thread);
#endif /* CONFIG_MULTITHREADING */
/* do any necessary initialization of static devices */
z_device_state_init();
Expand Down
44 changes: 6 additions & 38 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,6 @@ extern struct k_thread *pending_current;

struct k_spinlock _sched_spinlock;

/* Storage to "complete" the context switch from an invalid/incomplete thread
* context (ex: exiting an ISR that aborted _current)
*/
__incoherent struct k_thread _thread_dummies[CONFIG_MP_MAX_NUM_CPUS];

static void update_cache(int preempt_ok);
static void halt_thread(struct k_thread *thread, uint8_t new_state);
static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
Expand Down Expand Up @@ -428,21 +423,19 @@ void z_sched_start(struct k_thread *thread)
* another CPU to catch the IPI we sent and halt. Note that we check
* for ourselves being asynchronously halted first to prevent simple
* deadlocks (but not complex ones involving cycles of 3+ threads!).
* Acts to release the provided lock before returning.
*/
static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
static k_spinlock_key_t thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
{
if (is_halting(_current)) {
halt_thread(_current,
is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
}
k_spin_unlock(&_sched_spinlock, key);
while (is_halting(thread)) {
unsigned int k = arch_irq_lock();

arch_spin_relax(); /* Requires interrupts be masked */
arch_irq_unlock(k);
}
key = k_spin_lock(&_sched_spinlock);
z_sched_switch_spin(thread);
return key;
}

/* Shared handler for k_thread_{suspend,abort}(). Called with the
Expand Down Expand Up @@ -472,7 +465,8 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
arch_sched_ipi();
#endif
if (arch_is_in_isr()) {
thread_halt_spin(thread, key);
key = thread_halt_spin(thread, key);
k_spin_unlock(&_sched_spinlock, key);
} else {
add_to_waitq_locked(_current, wq);
z_swap(&_sched_spinlock, key);
Expand All @@ -486,10 +480,6 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
k_spin_unlock(&_sched_spinlock, key);
}
}
/* NOTE: the scheduler lock has been released. Don't put
* logic here, it's likely to be racy/deadlocky even if you
* re-take the lock!
*/
}


Expand Down Expand Up @@ -1289,8 +1279,6 @@ extern void thread_abort_hook(struct k_thread *thread);
*/
static void halt_thread(struct k_thread *thread, uint8_t new_state)
{
bool dummify = false;

/* We hold the lock, and the thread is known not to be running
* anywhere.
*/
Expand All @@ -1307,16 +1295,6 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
}
(void)z_abort_thread_timeout(thread);
unpend_all(&thread->join_queue);

/* Edge case: aborting _current from within an
* ISR that preempted it requires clearing the
* _current pointer so the upcoming context
* switch doesn't clobber the now-freed
* memory
*/
if (thread == _current && arch_is_in_isr()) {
dummify = true;
}
}
#ifdef CONFIG_SMP
unpend_all(&thread->halt_queue);
Expand Down Expand Up @@ -1355,16 +1333,6 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
k_thread_abort_cleanup(thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */

/* Do this "set _current to dummy" step last so that
* subsystems above can rely on _current being
* unchanged. Disabled for posix as that arch
* continues to use the _current pointer in its swap
* code.
*/
if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
z_dummy_thread_init(&_thread_dummies[_current_cpu->id]);
}
}
}

Expand Down
3 changes: 2 additions & 1 deletion kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ static void wait_for_start_signal(atomic_t *start_flag)

static inline void smp_init_top(void *arg)
{
struct k_thread dummy_thread;
struct cpu_start_cb csc = arg ? *(struct cpu_start_cb *)arg : (struct cpu_start_cb){0};

/* Let start_cpu() know that this CPU has powered up. */
Expand All @@ -122,7 +123,7 @@ static inline void smp_init_top(void *arg)
/* Initialize the dummy thread struct so that
* the scheduler can schedule actual threads to run.
*/
z_dummy_thread_init(&_thread_dummies[arch_curr_cpu()->id]);
z_dummy_thread_init(&dummy_thread);
}

#ifdef CONFIG_SYS_CLOCK_EXISTS
Expand Down

0 comments on commit e92efe1

Please sign in to comment.