From 732bcce2080df05b4f12f349f8b42c6cffa89002 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Fri, 6 Apr 2018 13:51:25 -0500 Subject: [PATCH 1/5] tap: set vhostfd passed from qemu cli to non-blocking A guest boot hangs while probing the network interface when iommu_platform=on is used. The following qemu cli hangs without this patch: # $QEMU \ -netdev tap,fd=3,id=hostnet0,vhost=on,vhostfd=4 3<>/dev/tap67 4<>/dev/host-net \ -device virtio-net-pci,netdev=hostnet0,id=net0,iommu_platform=on,disable-legacy=on \ ... Commit: c471ad0e9bd46 (vhost_net: device IOTLB support) took care of setting vhostfd to non-blocking when QEMU opens /dev/host-net but if the fd is passed from qemu cli then we need to ensure that fd is set to non-blocking. Fixes: c471ad0e9bd46 ("vhost_net: device IOTLB support") Cc: qemu-stable@nongnu.org Cc: Michael S. Tsirkin Cc: Jason Wang Signed-off-by: Brijesh Singh Signed-off-by: Jason Wang --- net/tap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/tap.c b/net/tap.c index 979e622e607..aefd6edd1ea 100644 --- a/net/tap.c +++ b/net/tap.c @@ -40,6 +40,7 @@ #include "qemu-common.h" #include "qemu/cutils.h" #include "qemu/error-report.h" +#include "qemu/sockets.h" #include "net/tap.h" @@ -689,6 +690,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, error_propagate(errp, err); return; } + qemu_set_nonblock(vhostfd); } else { vhostfd = open("/dev/vhost-net", O_RDWR); if (vhostfd < 0) { From 42ef16ba4461e9eac1b9397dfb614fd587f422b4 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Tue, 7 Jul 2020 20:45:15 +0200 Subject: [PATCH 2/5] net: detect errors from probing vnet hdr flag for TAP devices When QEMU sets up a tap based network device backend, it mostly ignores errors reported from various ioctl() calls it makes, assuming the TAP file descriptor is valid. This assumption can easily be violated when the user is passing in a pre-opened file descriptor. At best, the ioctls may fail with a -EBADF, but if the user passes in a bogus FD number that happens to clash with a FD number that QEMU has opened internally for another reason, a wide variety of errnos may result, as the TUNGETIFF ioctl number may map to a completely different command on a different type of file. By ignoring all these errors, QEMU sets up a zombie network backend that will never pass any data. Even worse, when QEMU shuts down, or that network backend is hot-removed, it will close this bogus file descriptor, which could belong to another QEMU device backend. There's no obvious guaranteed reliable way to detect that a FD genuinely is a TAP device, as opposed to a UNIX socket, or pipe, or something else. Checking the errno from probing vnet hdr flag though, does catch the big common cases. ie calling TUNGETIFF will return EBADF for an invalid FD, and ENOTTY when FD is a UNIX socket, or pipe which catches accidental collisions with FDs used for stdio, or monitor socket. Previously the example below where bogus fd 9 collides with the FD used for the chardev saw: $ ./x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hostnet0,fd=9 \ -chardev socket,id=charchannel0,path=/tmp/qga,server,nowait \ -monitor stdio -vnc :0 qemu-system-x86_64: -netdev tap,id=hostnet0,fd=9: TUNGETIFF ioctl() failed: Inappropriate ioctl for device TUNSETOFFLOAD ioctl() failed: Bad address QEMU 2.9.1 monitor - type 'help' for more information (qemu) Warning: netdev hostnet0 has no peer which gives a running QEMU with a zombie network backend. With this change applied we get an error message and QEMU immediately exits before carrying on and making a bigger disaster: $ ./x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hostnet0,fd=9 \ -chardev socket,id=charchannel0,path=/tmp/qga,server,nowait \ -monitor stdio -vnc :0 qemu-system-x86_64: -netdev tap,id=hostnet0,vhost=on,fd=9: Unable to query TUNGETIFF on FD 9: Inappropriate ioctl for device Reported-by: Dr. David Alan Gilbert Signed-off-by: Daniel P. Berrange Tested-by: Dr. David Alan Gilbert Message-id: 20171027085548.3472-1-berrange@redhat.com [lv: to simplify, don't check on EINVAL with TUNGETIFF as it exists since v2.6.27] Signed-off-by: Laurent Vivier Signed-off-by: Jason Wang --- net/tap-aix.c | 2 +- net/tap-bsd.c | 2 +- net/tap-linux.c | 8 +++++--- net/tap-solaris.c | 2 +- net/tap.c | 29 ++++++++++++++++++++++------- net/tap_int.h | 2 +- 6 files changed, 31 insertions(+), 14 deletions(-) diff --git a/net/tap-aix.c b/net/tap-aix.c index 0e6da639637..dce2c64238a 100644 --- a/net/tap-aix.c +++ b/net/tap-aix.c @@ -37,7 +37,7 @@ void tap_set_sndbuf(int fd, const NetdevTapOptions *tap, Error **errp) { } -int tap_probe_vnet_hdr(int fd) +int tap_probe_vnet_hdr(int fd, Error **errp) { return 0; } diff --git a/net/tap-bsd.c b/net/tap-bsd.c index 6c9692263d3..4f1d633b089 100644 --- a/net/tap-bsd.c +++ b/net/tap-bsd.c @@ -211,7 +211,7 @@ void tap_set_sndbuf(int fd, const NetdevTapOptions *tap, Error **errp) { } -int tap_probe_vnet_hdr(int fd) +int tap_probe_vnet_hdr(int fd, Error **errp) { return 0; } diff --git a/net/tap-linux.c b/net/tap-linux.c index a503fa9c6ee..9910e62efd4 100644 --- a/net/tap-linux.c +++ b/net/tap-linux.c @@ -147,13 +147,15 @@ void tap_set_sndbuf(int fd, const NetdevTapOptions *tap, Error **errp) } } -int tap_probe_vnet_hdr(int fd) +int tap_probe_vnet_hdr(int fd, Error **errp) { struct ifreq ifr; if (ioctl(fd, TUNGETIFF, &ifr) != 0) { - error_report("TUNGETIFF ioctl() failed: %s", strerror(errno)); - return 0; + /* TUNGETIFF is available since kernel v2.6.27 */ + error_setg_errno(errp, errno, + "Unable to query TUNGETIFF on FD %d", fd); + return -1; } return ifr.ifr_flags & IFF_VNET_HDR; diff --git a/net/tap-solaris.c b/net/tap-solaris.c index a2a92356c1a..3437838a92f 100644 --- a/net/tap-solaris.c +++ b/net/tap-solaris.c @@ -206,7 +206,7 @@ void tap_set_sndbuf(int fd, const NetdevTapOptions *tap, Error **errp) { } -int tap_probe_vnet_hdr(int fd) +int tap_probe_vnet_hdr(int fd, Error **errp) { return 0; } diff --git a/net/tap.c b/net/tap.c index aefd6edd1ea..24e4697e4e7 100644 --- a/net/tap.c +++ b/net/tap.c @@ -592,8 +592,12 @@ int net_init_bridge(const Netdev *netdev, const char *name, return -1; } - fcntl(fd, F_SETFL, O_NONBLOCK); - vnet_hdr = tap_probe_vnet_hdr(fd); + qemu_set_nonblock(fd); + vnet_hdr = tap_probe_vnet_hdr(fd, errp); + if (vnet_hdr < 0) { + close(fd); + return -1; + } s = net_tap_fd_init(peer, "bridge", name, fd, vnet_hdr); snprintf(s->nc.info_str, sizeof(s->nc.info_str), "helper=%s,br=%s", helper, @@ -781,7 +785,11 @@ int net_init_tap(const Netdev *netdev, const char *name, fcntl(fd, F_SETFL, O_NONBLOCK); - vnet_hdr = tap_probe_vnet_hdr(fd); + vnet_hdr = tap_probe_vnet_hdr(fd, errp); + if (vnet_hdr < 0) { + close(fd); + return -1; + } net_init_tap_one(tap, peer, "tap", name, NULL, script, downscript, @@ -827,8 +835,11 @@ int net_init_tap(const Netdev *netdev, const char *name, fcntl(fd, F_SETFL, O_NONBLOCK); if (i == 0) { - vnet_hdr = tap_probe_vnet_hdr(fd); - } else if (vnet_hdr != tap_probe_vnet_hdr(fd)) { + vnet_hdr = tap_probe_vnet_hdr(fd, errp); + if (vnet_hdr < 0) { + goto free_fail; + } + } else if (vnet_hdr != tap_probe_vnet_hdr(fd, NULL)) { error_setg(errp, "vnet_hdr not consistent across given tap fds"); goto free_fail; @@ -871,8 +882,12 @@ int net_init_tap(const Netdev *netdev, const char *name, return -1; } - fcntl(fd, F_SETFL, O_NONBLOCK); - vnet_hdr = tap_probe_vnet_hdr(fd); + qemu_set_nonblock(fd); + vnet_hdr = tap_probe_vnet_hdr(fd, errp); + if (vnet_hdr < 0) { + close(fd); + return -1; + } net_init_tap_one(tap, peer, "bridge", name, ifname, script, downscript, vhostfdname, diff --git a/net/tap_int.h b/net/tap_int.h index ae6888f74ae..0d137686159 100644 --- a/net/tap_int.h +++ b/net/tap_int.h @@ -35,7 +35,7 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, ssize_t tap_read_packet(int tapfd, uint8_t *buf, int maxlen); void tap_set_sndbuf(int fd, const NetdevTapOptions *tap, Error **errp); -int tap_probe_vnet_hdr(int fd); +int tap_probe_vnet_hdr(int fd, Error **errp); int tap_probe_vnet_hdr_len(int fd, int len); int tap_probe_has_ufo(int fd); void tap_fd_set_offload(int fd, int csum, int tso4, int tso6, int ecn, int ufo); From 9b3da77087c74d297f889968e7ae8b81198a3b82 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 7 Apr 2020 10:07:46 -0400 Subject: [PATCH 3/5] async: use explicit memory barriers When using C11 atomics, non-seqcst reads and writes do not participate in the total order of seqcst operations. In util/async.c and util/aio-posix.c, in particular, the pattern that we use write ctx->notify_me write bh->scheduled read bh->scheduled read ctx->notify_me if !bh->scheduled, sleep if ctx->notify_me, notify needs to use seqcst operations for both the write and the read. In general this is something that we do not want, because there can be many sources that are polled in addition to bottom halves. The alternative is to place a seqcst memory barrier between the write and the read. This also comes with a disadvantage, in that the memory barrier is implicit on strongly-ordered architectures and it wastes a few dozen clock cycles. Fortunately, ctx->notify_me is never written concurrently by two threads, so we can assert that and relax the writes to ctx->notify_me. The resulting solution works and performs well on both aarch64 and x86. Note that the atomic_set/atomic_read combination is not an atomic read-modify-write, and therefore it is even weaker than C11 ATOMIC_RELAXED; on x86, ATOMIC_RELAXED compiles to a locked operation. Analyzed-by: Ying Fang Signed-off-by: Paolo Bonzini Tested-by: Ying Fang Message-Id: <20200407140746.8041-6-pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi --- util/aio-posix.c | 19 +++++++++++++++++-- util/aio-win32.c | 17 +++++++++++++++-- util/async.c | 16 ++++++++++++---- 3 files changed, 44 insertions(+), 8 deletions(-) diff --git a/util/aio-posix.c b/util/aio-posix.c index 2d51239ec6f..67437b7b6b4 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -575,6 +575,13 @@ bool aio_poll(AioContext *ctx, bool blocking) int64_t timeout; int64_t start = 0; + /* + * There cannot be two concurrent aio_poll calls for the same AioContext (or + * an aio_poll concurrent with a GSource prepare/check/dispatch callback). + * We rely on this below to avoid slow locked accesses to ctx->notify_me. + */ + assert(aio_context_in_iothread(ctx)); + /* aio_notify can avoid the expensive event_notifier_set if * everything (file descriptors, bottom halves, timers) will * be re-evaluated before the next blocking poll(). This is @@ -583,7 +590,13 @@ bool aio_poll(AioContext *ctx, bool blocking) * so disable the optimization now. */ if (blocking) { - atomic_add(&ctx->notify_me, 2); + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); + /* + * Write ctx->notify_me before computing the timeout + * (reading bottom half flags, etc.). Pairs with + * smp_mb in aio_notify(). + */ + smp_mb(); } qemu_lockcnt_inc(&ctx->list_lock); @@ -624,7 +637,9 @@ bool aio_poll(AioContext *ctx, bool blocking) } if (blocking) { - atomic_sub(&ctx->notify_me, 2); + /* Finish the poll before clearing the flag. */ + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); + aio_notify_accept(ctx); } /* Adjust polling time */ diff --git a/util/aio-win32.c b/util/aio-win32.c index bca496a47a4..0a7390e91ba 100644 --- a/util/aio-win32.c +++ b/util/aio-win32.c @@ -323,6 +323,12 @@ bool aio_poll(AioContext *ctx, bool blocking) int count; int timeout; + /* + * There cannot be two concurrent aio_poll calls for the same AioContext (or + * an aio_poll concurrent with a GSource prepare/check/dispatch callback). + * We rely on this below to avoid slow locked accesses to ctx->notify_me. + */ + assert(in_aio_context_home_thread(ctx)); progress = false; /* aio_notify can avoid the expensive event_notifier_set if @@ -333,7 +339,13 @@ bool aio_poll(AioContext *ctx, bool blocking) * so disable the optimization now. */ if (blocking) { - atomic_add(&ctx->notify_me, 2); + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); + /* + * Write ctx->notify_me before computing the timeout + * (reading bottom half flags, etc.). Pairs with + * smp_mb in aio_notify(). + */ + smp_mb(); } qemu_lockcnt_inc(&ctx->list_lock); @@ -366,7 +378,8 @@ bool aio_poll(AioContext *ctx, bool blocking) ret = WaitForMultipleObjects(count, events, FALSE, timeout); if (blocking) { assert(first); - atomic_sub(&ctx->notify_me, 2); + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); + aio_notify_accept(ctx); } if (first) { diff --git a/util/async.c b/util/async.c index 355af73ee71..9596cd288dd 100644 --- a/util/async.c +++ b/util/async.c @@ -221,7 +221,14 @@ aio_ctx_prepare(GSource *source, gint *timeout) { AioContext *ctx = (AioContext *) source; - atomic_or(&ctx->notify_me, 1); + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) | 1); + + /* + * Write ctx->notify_me before computing the timeout + * (reading bottom half flags, etc.). Pairs with + * smp_mb in aio_notify(). + */ + smp_mb(); /* We assume there is no timeout already supplied */ *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); @@ -239,7 +246,8 @@ aio_ctx_check(GSource *source) AioContext *ctx = (AioContext *) source; QEMUBH *bh; - atomic_and(&ctx->notify_me, ~1); + /* Finish computing the timeout before clearing the flag. */ + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) & ~1); aio_notify_accept(ctx); for (bh = ctx->first_bh; bh; bh = bh->next) { @@ -335,10 +343,10 @@ LinuxAioState *aio_get_linux_aio(AioContext *ctx) void aio_notify(AioContext *ctx) { /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs - * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll. + * with smp_mb in aio_ctx_prepare or aio_poll. */ smp_mb(); - if (ctx->notify_me) { + if (atomic_read(&ctx->notify_me)) { event_notifier_set(&ctx->notifier); atomic_mb_set(&ctx->notified, true); } From ebab9deb03d72f681c7c4e2ef1836627e5191c39 Mon Sep 17 00:00:00 2001 From: Andrew Fasano Date: Thu, 20 Oct 2022 11:21:13 -0400 Subject: [PATCH 4/5] bugfix: restore iothread mutex locks and unlocks These iothread mutex locks/unlocks were dropped somewhere between qemu 2.9.1 and our current codebase. Assuming they don't break record/replay, they probalby should still be here. Without these, PANDA would deadlock about 75% of the time with the vexpress-a9 ARM guest machine when using -netdev tap and virtio networking. --- cpu-exec.c | 5 +++++ cpus.c | 2 ++ memory.c | 2 ++ qom/cpu.c | 8 ++++++++ 4 files changed, 17 insertions(+) diff --git a/cpu-exec.c b/cpu-exec.c index 11e44d13bf9..8c95120da74 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -471,9 +471,11 @@ static inline bool cpu_handle_halt(CPUState *cpu) #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) && replay_interrupt()) { + qemu_mutex_lock_iothread(); X86CPU *x86_cpu = X86_CPU(cpu); apic_poll_irq(x86_cpu->apic_state); cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); + qemu_mutex_unlock_iothread(); } #endif if (!cpu_has_work(cpu) && !rr_in_replay()) { @@ -801,6 +803,9 @@ int cpu_exec(CPUState *cpu) #endif /* buggy compiler */ cpu->can_do_io = 1; tb_lock_reset(); + if (qemu_mutex_iothread_locked()) { + qemu_mutex_unlock_iothread(); + } } /* if an exception is pending, we execute it here */ diff --git a/cpus.c b/cpus.c index 1756c8c343c..e0b62b70abb 100644 --- a/cpus.c +++ b/cpus.c @@ -1182,6 +1182,7 @@ static int tcg_cpu_exec(CPUState *cpu) #ifdef CONFIG_PROFILER ti = profile_getclock(); #endif + qemu_mutex_unlock_iothread(); if (use_icount) { int64_t count; int decr; @@ -1199,6 +1200,7 @@ static int tcg_cpu_exec(CPUState *cpu) cpu_exec_start(cpu); ret = cpu_exec(cpu); cpu_exec_end(cpu); + qemu_mutex_lock_iothread(); #ifdef CONFIG_PROFILER tcg_time += profile_getclock() - ti; #endif diff --git a/memory.c b/memory.c index d7680d841e9..38e43b457c5 100644 --- a/memory.c +++ b/memory.c @@ -915,6 +915,8 @@ void memory_region_transaction_commit(void) AddressSpace *as; assert(memory_region_transaction_depth); + assert(qemu_mutex_iothread_locked()); + --memory_region_transaction_depth; if (!memory_region_transaction_depth) { if (memory_region_update_pending) { diff --git a/qom/cpu.c b/qom/cpu.c index ba42b364946..bbfaae7a50f 100644 --- a/qom/cpu.c +++ b/qom/cpu.c @@ -115,7 +115,15 @@ static void cpu_common_get_memory_mapping(CPUState *cpu, void cpu_reset_interrupt(CPUState *cpu, int mask) { + bool need_lock = !qemu_mutex_iothread_locked(); + + if (need_lock) { + qemu_mutex_lock_iothread(); + } cpu->interrupt_request &= ~mask; + if (need_lock) { + qemu_mutex_unlock_iothread(); + } } void cpu_exit(CPUState *cpu) From b5aad15035c33ae3bef769b3ac67243d96d3bac7 Mon Sep 17 00:00:00 2001 From: Andrew Fasano Date: Thu, 20 Oct 2022 11:52:28 -0400 Subject: [PATCH 5/5] Restore more iothread mutex locks/unlocks, including asserts in locking and unlocking fns --- cpu-exec.c | 12 +++++++++++- cpus.c | 2 ++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cpu-exec.c b/cpu-exec.c index 8c95120da74..10b3ffaad19 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -471,8 +471,8 @@ static inline bool cpu_handle_halt(CPUState *cpu) #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) && replay_interrupt()) { - qemu_mutex_lock_iothread(); X86CPU *x86_cpu = X86_CPU(cpu); + qemu_mutex_lock_iothread(); apic_poll_irq(x86_cpu->apic_state); cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); qemu_mutex_unlock_iothread(); @@ -550,7 +550,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) rr_exception_index_at(RR_CALLSITE_CPU_EXCEPTION_INDEX, &cpu->exception_index); #endif CPUClass *cc = CPU_GET_CLASS(cpu); + qemu_mutex_lock_iothread(); cc->do_interrupt(cpu); + qemu_mutex_unlock_iothread(); cpu->exception_index = -1; } else if (!replay_has_interrupt()) { /* give a chance to iothread in replay mode */ @@ -594,6 +596,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, } #endif if (unlikely(interrupt_request)) { + qemu_mutex_lock_iothread(); if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; @@ -601,6 +604,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, if (interrupt_request & CPU_INTERRUPT_DEBUG) { cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->exception_index = EXCP_DEBUG; + qemu_mutex_unlock_iothread(); return true; } if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { @@ -610,6 +614,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->halted = 1; cpu->exception_index = EXCP_HLT; + qemu_mutex_unlock_iothread(); return true; } #if defined(TARGET_I386) @@ -620,12 +625,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); do_cpu_init(x86_cpu); cpu->exception_index = EXCP_HALTED; + qemu_mutex_unlock_iothread(); return true; } #else else if (interrupt_request & CPU_INTERRUPT_RESET) { replay_interrupt(); cpu_reset(cpu); + qemu_mutex_unlock_iothread(); return true; } #endif @@ -656,6 +663,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, the program flow was changed */ *last_tb = NULL; } + + /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ + qemu_mutex_unlock_iothread(); } if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) { atomic_set(&cpu->exit_request, 0); diff --git a/cpus.c b/cpus.c index e0b62b70abb..186219f3ee2 100644 --- a/cpus.c +++ b/cpus.c @@ -1447,6 +1447,7 @@ bool qemu_mutex_iothread_locked(void) void qemu_mutex_lock_iothread(void) { + g_assert(!qemu_mutex_iothread_locked()); atomic_inc(&iothread_requesting_mutex); /* In the simple case there is no need to bump the VCPU thread out of * TCG code execution. @@ -1468,6 +1469,7 @@ void qemu_mutex_lock_iothread(void) void qemu_mutex_unlock_iothread(void) { + g_assert(qemu_mutex_iothread_locked()); iothread_locked = false; qemu_mutex_unlock(&qemu_global_mutex); }