Skip to content

Commit

Permalink
Switch to using BQL_LOCK_GUARD() macro
Browse files Browse the repository at this point in the history
This lock is scope-bound - so we will avoid errors when adding new early
return statements.

BQL_LOCK_GUARD is an architecture-independent QEMU idiom to replace the
hexagon-specific {UN,}LOCK_IOTHREAD().  The hexagon-specific macro was
a pseudo-nested lock but we don't seem to need that flexibility if we
move the lock acquisition up to a consistent place high enough in the
call stack.

Signed-off-by: Brian Cain <bcain@quicinc.com>
  • Loading branch information
androm3da authored and quic-mathbern committed Jul 15, 2024
1 parent 7c87f97 commit 608cc69
Show file tree
Hide file tree
Showing 8 changed files with 55 additions and 112 deletions.
1 change: 1 addition & 0 deletions target/hexagon/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -571,6 +571,7 @@ static void hexagon_restore_state_to_opc(CPUState *cs,
#if !defined(CONFIG_USER_ONLY)
void hexagon_cpu_soft_reset(CPUHexagonState *env)
{
BQL_LOCK_GUARD();
ARCH_SET_SYSTEM_REG(env, HEX_SREG_SSR, 0);
hexagon_ssr_set_cause(env, HEX_CAUSE_RESET);

Expand Down
10 changes: 0 additions & 10 deletions target/hexagon/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -303,16 +303,6 @@ typedef struct PMUState {
} PMUState;
#endif

#define LOCK_IOTHREAD(VAR) \
if (!(VAR)) { \
qemu_mutex_lock_iothread(); \
}
#define UNLOCK_IOTHREAD(VAR) \
if (!(VAR)) { \
qemu_mutex_unlock_iothread(); \
}


struct Einfo {
uint8_t valid;
uint8_t type;
Expand Down
41 changes: 13 additions & 28 deletions target/hexagon/cpu_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -337,48 +337,41 @@ void hexagon_touch_memory(CPUHexagonState *env, uint32_t start_addr,
static void set_enable_mask(CPUHexagonState *env)

{
const bool exception_context = qemu_mutex_iothread_locked();
LOCK_IOTHREAD(exception_context);
g_assert(bql_locked());

const uint32_t modectl = ARCH_GET_SYSTEM_REG(env, HEX_SREG_MODECTL);
uint32_t thread_enabled_mask = GET_FIELD(MODECTL_E, modectl);
thread_enabled_mask |= 0x1 << env->threadId;
SET_SYSTEM_FIELD(env, HEX_SREG_MODECTL, MODECTL_E, thread_enabled_mask);
UNLOCK_IOTHREAD(exception_context);
}

static uint32_t clear_enable_mask(CPUHexagonState *env)

{
const bool exception_context = qemu_mutex_iothread_locked();
LOCK_IOTHREAD(exception_context);
g_assert(bql_locked());

const uint32_t modectl = ARCH_GET_SYSTEM_REG(env, HEX_SREG_MODECTL);
uint32_t thread_enabled_mask = GET_FIELD(MODECTL_E, modectl);
thread_enabled_mask &= ~(0x1 << env->threadId);
SET_SYSTEM_FIELD(env, HEX_SREG_MODECTL, MODECTL_E, thread_enabled_mask);
UNLOCK_IOTHREAD(exception_context);
return thread_enabled_mask;
}

static void set_wait_mode(CPUHexagonState *env)

{
const bool exception_context = qemu_mutex_iothread_locked();
LOCK_IOTHREAD(exception_context);
g_assert(bql_locked());

const uint32_t modectl = ARCH_GET_SYSTEM_REG(env, HEX_SREG_MODECTL);
uint32_t thread_wait_mask = GET_FIELD(MODECTL_W, modectl);
thread_wait_mask |= 0x1 << env->threadId;
SET_SYSTEM_FIELD(env, HEX_SREG_MODECTL, MODECTL_W, thread_wait_mask);
UNLOCK_IOTHREAD(exception_context);
}

void hexagon_wait_thread(CPUHexagonState *env, target_ulong PC)

{
const bool exception_context = qemu_mutex_iothread_locked();
LOCK_IOTHREAD(exception_context);
g_assert(bql_locked());

if (qemu_loglevel_mask(LOG_GUEST_ERROR) &&
(ATOMIC_LOAD(env->k0_lock_state) != HEX_LOCK_UNLOCKED ||
Expand All @@ -395,13 +388,11 @@ void hexagon_wait_thread(CPUHexagonState *env, target_ulong PC)
*/
if ((cs->exception_index != HEX_EVENT_NONE) ||
(cpu_has_work(cs))) {
UNLOCK_IOTHREAD(exception_context);
return;
}
set_wait_mode(env);
env->wait_next_pc = PC + 4;

UNLOCK_IOTHREAD(exception_context);
cpu_stop_current();
}

Expand All @@ -426,12 +417,11 @@ static void hexagon_resume_thread(CPUHexagonState *env, uint32_t ei)

void hexagon_resume_threads(CPUHexagonState *current_env, uint32_t mask)
{
const bool exception_context = qemu_mutex_iothread_locked();
CPUState *cs;
CPUHexagonState *env;
bool found;

LOCK_IOTHREAD(exception_context);
g_assert(bql_locked());
for (int htid = 0 ; htid < THREADS_MAX ; ++htid) {
if (!(mask & (0x1 << htid))) {
continue;
Expand All @@ -457,11 +447,12 @@ void hexagon_resume_threads(CPUHexagonState *current_env, uint32_t mask)
}
hexagon_resume_thread(env, HEX_EVENT_NONE);
}
UNLOCK_IOTHREAD(exception_context);
}

static void do_start_thread(CPUState *cs, run_on_cpu_data tbd)
{
BQL_LOCK_GUARD();

HexagonCPU *cpu = HEXAGON_CPU(cs);
CPUHexagonState *env = &cpu->env;

Expand Down Expand Up @@ -510,8 +501,9 @@ static target_ulong get_thread0_r2(void)
void hexagon_stop_thread(CPUHexagonState *env)

{
BQL_LOCK_GUARD();
HexagonCPU *cpu = env_archcpu(env);
#if HEX_DEBUG
#if HEX_DEBUG
HEX_DEBUG_LOG("%s: htid %d, cpu %p\n", __func__,
ARCH_GET_SYSTEM_REG(env, HEX_SREG_HTID), cpu);
#endif
Expand Down Expand Up @@ -645,7 +637,7 @@ const char *get_exe_mode_str(CPUHexagonState *env)

int get_exe_mode(CPUHexagonState *env)
{
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());

target_ulong modectl = ARCH_GET_SYSTEM_REG(env, HEX_SREG_MODECTL);
uint32_t thread_enabled_mask = GET_FIELD(MODECTL_E, modectl);
Expand Down Expand Up @@ -675,38 +667,32 @@ int get_exe_mode(CPUHexagonState *env)
void clear_wait_mode(CPUHexagonState *env)

{
const bool exception_context = qemu_mutex_iothread_locked();
LOCK_IOTHREAD(exception_context);
g_assert(bql_locked());

const uint32_t modectl = ARCH_GET_SYSTEM_REG(env, HEX_SREG_MODECTL);
uint32_t thread_wait_mask = GET_FIELD(MODECTL_W, modectl);
thread_wait_mask &= ~(0x1 << env->threadId);
SET_SYSTEM_FIELD(env, HEX_SREG_MODECTL, MODECTL_W, thread_wait_mask);
UNLOCK_IOTHREAD(exception_context);
}

void hexagon_ssr_set_cause(CPUHexagonState *env, uint32_t cause)
{
const bool exception_context = qemu_mutex_iothread_locked();
LOCK_IOTHREAD(exception_context);

g_assert(bql_locked());

const uint32_t old = ARCH_GET_SYSTEM_REG(env, HEX_SREG_SSR);
SET_SYSTEM_FIELD(env, HEX_SREG_SSR, SSR_EX, 1);
SET_SYSTEM_FIELD(env, HEX_SREG_SSR, SSR_CAUSE, cause);
const uint32_t new = ARCH_GET_SYSTEM_REG(env, HEX_SREG_SSR);

hexagon_modify_ssr(env, new, old);
UNLOCK_IOTHREAD(exception_context);
}

static MMVector VRegs[VECTOR_UNIT_MAX][NUM_VREGS];
static MMQReg QRegs[VECTOR_UNIT_MAX][NUM_QREGS];

void hexagon_modify_ssr(CPUHexagonState *env, uint32_t new, uint32_t old)
{
const bool exception_context = qemu_mutex_iothread_locked();
LOCK_IOTHREAD(exception_context);
g_assert(bql_locked());

bool old_EX = GET_SSR_FIELD(SSR_EX, old);
bool old_UM = GET_SSR_FIELD(SSR_UM, old);
Expand Down Expand Up @@ -792,7 +778,6 @@ void hexagon_modify_ssr(CPUHexagonState *env, uint32_t new, uint32_t old)
(!new_EX && old_EX)) {
hex_interrupt_update(env);
}
UNLOCK_IOTHREAD(exception_context);
}

void hexagon_set_sys_pcycle_count_high(CPUHexagonState *env,
Expand Down
17 changes: 4 additions & 13 deletions target/hexagon/hex_interrupts.c
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,6 @@ bool hex_check_interrupts(CPUHexagonState *env)
bool int_handled = false;
bool ssr_ex = get_ssr_ex(env);
int max_ints;
const bool exception_context = qemu_mutex_iothread_locked();
bool schedcfgen;

/* Early exit if nothing pending */
Expand All @@ -237,7 +236,7 @@ bool hex_check_interrupts(CPUHexagonState *env)
}

max_ints = reg_field_info[IPENDAD_IPEND].width;
LOCK_IOTHREAD(exception_context);
BQL_LOCK_GUARD();
/* Only check priorities when schedcfgen is set */
schedcfgen = get_schedcfgen(env);
for (int i = 0; i < max_ints; i++) {
Expand Down Expand Up @@ -286,49 +285,43 @@ bool hex_check_interrupts(CPUHexagonState *env)
} else if (int_handled) {
assert(!cs->halted);
}
UNLOCK_IOTHREAD(exception_context);

return int_handled;
}

void hex_clear_interrupts(CPUHexagonState *env, uint32_t mask, uint32_t type)
{
const bool exception_context = qemu_mutex_iothread_locked();
if (mask == 0) {
return;
}

/*
* Notify all CPUs that the interrupt has happened
*/
LOCK_IOTHREAD(exception_context);
BQL_LOCK_GUARD();
clear_ipend(env, mask);
hex_interrupt_update(env);
UNLOCK_IOTHREAD(exception_context);
}

void hex_raise_interrupts(CPUHexagonState *env, uint32_t mask, uint32_t type)
{
const bool exception_context = qemu_mutex_iothread_locked();
g_assert(bql_locked());
if (mask == 0) {
return;
}

/*
* Notify all CPUs that the interrupt has happened
*/
LOCK_IOTHREAD(exception_context);
set_ipend(env, mask);
hex_interrupt_update(env);
UNLOCK_IOTHREAD(exception_context);
}

void hex_interrupt_update(CPUHexagonState *env)
{
const bool exception_context = qemu_mutex_iothread_locked();
CPUState *cs;
LOCK_IOTHREAD(exception_context);

g_assert(bql_locked());
if (get_ipend(env) != 0) {
CPU_FOREACH(cs) {
HexagonCPU *hex_cpu = HEXAGON_CPU(cs);
Expand All @@ -340,6 +333,4 @@ void hex_interrupt_update(CPUHexagonState *env)
}
}
}

UNLOCK_IOTHREAD(exception_context);
}
9 changes: 1 addition & 8 deletions target/hexagon/hex_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -534,15 +534,13 @@ static inline void print_thread_states(const char *str)
void hex_tlb_lock(CPUHexagonState *env)
{
qemu_log_mask(CPU_LOG_MMU, "hex_tlb_lock: %d\n", env->threadId);
const bool exception_context = qemu_mutex_iothread_locked();
LOCK_IOTHREAD(exception_context);
BQL_LOCK_GUARD();

uint32_t syscfg = ARCH_GET_SYSTEM_REG(env, HEX_SREG_SYSCFG);
uint8_t tlb_lock = GET_SYSCFG_FIELD(SYSCFG_TLBLOCK, syscfg);
if (tlb_lock) {
if (ATOMIC_LOAD(env->tlb_lock_state) == HEX_LOCK_OWNER) {
qemu_log_mask(CPU_LOG_MMU, "Already the owner\n");
UNLOCK_IOTHREAD(exception_context);
return;
}
qemu_log_mask(CPU_LOG_MMU, "\tWaiting\n");
Expand All @@ -558,22 +556,18 @@ void hex_tlb_lock(CPUHexagonState *env)
qemu_log_mask(CPU_LOG_MMU, "Threads after hex_tlb_lock:\n");
print_thread_states("\tThread");
}
UNLOCK_IOTHREAD(exception_context);
}

void hex_tlb_unlock(CPUHexagonState *env)
{
qemu_log_mask(CPU_LOG_MMU, "hex_tlb_unlock: %d\n", env->threadId);
const bool exception_context = qemu_mutex_iothread_locked();
LOCK_IOTHREAD(exception_context);

/* Nothing to do if the TLB isn't locked by this thread */
uint32_t syscfg = ARCH_GET_SYSTEM_REG(env, HEX_SREG_SYSCFG);
uint8_t tlb_lock = GET_SYSCFG_FIELD(SYSCFG_TLBLOCK, syscfg);
if ((tlb_lock == 0) ||
(ATOMIC_LOAD(env->tlb_lock_state) != HEX_LOCK_OWNER)) {
qemu_log_mask(CPU_LOG_MMU, "\tNot owner\n");
UNLOCK_IOTHREAD(exception_context);
return;
}

Expand Down Expand Up @@ -631,6 +625,5 @@ void hex_tlb_unlock(CPUHexagonState *env)
qemu_log_mask(CPU_LOG_MMU, "Threads after hex_tlb_unlock:\n");
print_thread_states("\tThread");
}
UNLOCK_IOTHREAD(exception_context);
}

8 changes: 3 additions & 5 deletions target/hexagon/hexswi.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ static int MapError(int ERR)
static int sim_handle_trap_functional(CPUHexagonState *env)

{
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());

target_ulong ssr = ARCH_GET_SYSTEM_REG(env, HEX_SREG_SSR);
target_ulong what_swi = ARCH_GET_THREAD_REG(env, HEX_REG_R00);
Expand Down Expand Up @@ -1008,7 +1008,7 @@ static int sim_handle_trap_functional(CPUHexagonState *env)
static int sim_handle_trap(CPUHexagonState *env)

{
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());

int retval = 0;
target_ulong what_swi = ARCH_GET_THREAD_REG(env, HEX_REG_R00);
Expand Down Expand Up @@ -1064,8 +1064,7 @@ void hexagon_cpu_do_interrupt(CPUState *cs)
{
HexagonCPU *cpu = HEXAGON_CPU(cs);
CPUHexagonState *env = &cpu->env;
const bool exception_context = qemu_mutex_iothread_locked();
LOCK_IOTHREAD(exception_context);
BQL_LOCK_GUARD();

HEX_DEBUG_LOG("%s: tid %d, event 0x%x, cause 0x%x\n",
__func__, env->threadId, cs->exception_index, env->cause_code);
Expand Down Expand Up @@ -1295,7 +1294,6 @@ void hexagon_cpu_do_interrupt(CPUState *cs)
}

cs->exception_index = HEX_EVENT_NONE;
UNLOCK_IOTHREAD(exception_context);
}

void register_trap_exception(CPUHexagonState *env, int traptype, int imm,
Expand Down
Loading

0 comments on commit 608cc69

Please sign in to comment.