mirror of
https://gitlab.com/qemu-project/qemu.git
synced 2024-10-13 23:23:57 +03:00
Pull request
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmWcJMUACgkQnKSrs4Gr c8hh/Qf/Wt177UlhBR49OWmmegs8c8yS1mhyawo7YIJM4pqoXCYLaACpcKECXcGU rlgyR4ow68EXnnU8+/s2cp2UqHxrla+E2eNqBoTDmkNt3Cko5sJn5G5PM5EYK+mO JjFRzn7awRyxD6mGOuaMVoj6OuHbAA/U4JF7FhW0YuRl8v0/mvAxRSfQ4U6Crq/y 19Aa1CXHD1GH2CUJsMCY8zT47Dr4DJcvZx5IpcDFaHaYDCkktFwNzdo5IDnCx2M2 xnP37Qp/Q93cu12lWkVOu8HCT6yhoszahyOqlBxDmo7QeGkskrxGbMyE+vHM3fFI aGSxiw193U7/QWu+Cq2/727C3YIq1g== =pKUb -----END PGP SIGNATURE----- Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging Pull request # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmWcJMUACgkQnKSrs4Gr # c8hh/Qf/Wt177UlhBR49OWmmegs8c8yS1mhyawo7YIJM4pqoXCYLaACpcKECXcGU # rlgyR4ow68EXnnU8+/s2cp2UqHxrla+E2eNqBoTDmkNt3Cko5sJn5G5PM5EYK+mO # JjFRzn7awRyxD6mGOuaMVoj6OuHbAA/U4JF7FhW0YuRl8v0/mvAxRSfQ4U6Crq/y # 19Aa1CXHD1GH2CUJsMCY8zT47Dr4DJcvZx5IpcDFaHaYDCkktFwNzdo5IDnCx2M2 # xnP37Qp/Q93cu12lWkVOu8HCT6yhoszahyOqlBxDmo7QeGkskrxGbMyE+vHM3fFI # aGSxiw193U7/QWu+Cq2/727C3YIq1g== # =pKUb # -----END PGP SIGNATURE----- # gpg: Signature made Mon 08 Jan 2024 16:37:25 GMT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * tag 'block-pull-request' of https://gitlab.com/stefanha/qemu: Rename "QEMU global mutex" to "BQL" in comments and docs Replace "iothread lock" with "BQL" in comments qemu/main-loop: rename qemu_cond_wait_iothread() to qemu_cond_wait_bql() qemu/main-loop: rename QEMU_IOTHREAD_LOCK_GUARD to BQL_LOCK_GUARD system/cpus: rename qemu_mutex_lock_iothread() to bql_lock() iothread: Remove unused Error** argument in aio_context_set_aio_params Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
9468484fe9
@ -41,7 +41,7 @@ void accel_blocker_init(void)
|
||||
|
||||
void accel_ioctl_begin(void)
|
||||
{
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
if (likely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ void accel_ioctl_begin(void)
|
||||
|
||||
void accel_ioctl_end(void)
|
||||
{
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
if (likely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ void accel_ioctl_end(void)
|
||||
|
||||
void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
if (unlikely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
|
||||
void accel_cpu_ioctl_end(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
if (unlikely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -105,7 +105,7 @@ void accel_ioctl_inhibit_begin(void)
|
||||
* We allow to inhibit only when holding the BQL, so we can identify
|
||||
* when an inhibitor wants to issue an ioctl easily.
|
||||
*/
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
/* Block further invocations of the ioctls outside the BQL. */
|
||||
CPU_FOREACH(cpu) {
|
||||
|
@ -24,7 +24,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
@ -43,7 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
#ifndef _WIN32
|
||||
do {
|
||||
int sig;
|
||||
@ -56,11 +56,11 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
#else
|
||||
qemu_sem_wait(&cpu->sem);
|
||||
#endif
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ static void *hvf_cpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
@ -449,7 +449,7 @@ static void *hvf_cpu_thread_fn(void *arg)
|
||||
|
||||
hvf_vcpu_destroy(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
@ -58,7 +58,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
||||
|
||||
kvm_destroy_vcpu(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -806,7 +806,7 @@ static void kvm_dirty_ring_flush(void)
|
||||
* should always be with BQL held, serialization is guaranteed.
|
||||
* However, let's be sure of it.
|
||||
*/
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
/*
|
||||
* First make sure to flush the hardware buffers by kicking all
|
||||
* vcpus out in a synchronous way.
|
||||
@ -1391,9 +1391,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
|
||||
trace_kvm_dirty_ring_reaper("wakeup");
|
||||
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
kvm_dirty_ring_reap(s, NULL);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
r->reaper_iteration++;
|
||||
}
|
||||
@ -2817,7 +2817,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
return EXCP_HLT;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu_exec_start(cpu);
|
||||
|
||||
do {
|
||||
@ -2857,11 +2857,11 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
|
||||
#ifdef KVM_HAVE_MCE_INJECTION
|
||||
if (unlikely(have_sigbus_pending)) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
|
||||
pending_sigbus_addr);
|
||||
have_sigbus_pending = false;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2927,7 +2927,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
* still full. Got kicked by KVM_RESET_DIRTY_RINGS.
|
||||
*/
|
||||
trace_kvm_dirty_ring_full(cpu->cpu_index);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
/*
|
||||
* We throttle vCPU by making it sleep once it exit from kernel
|
||||
* due to dirty ring full. In the dirtylimit scenario, reaping
|
||||
@ -2939,7 +2939,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
} else {
|
||||
kvm_dirty_ring_reap(kvm_state, NULL);
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
dirtylimit_vcpu_execute(cpu);
|
||||
ret = 0;
|
||||
break;
|
||||
@ -2956,9 +2956,9 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
break;
|
||||
case KVM_SYSTEM_EVENT_CRASH:
|
||||
kvm_cpu_synchronize_state(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
@ -2973,7 +2973,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
} while (ret == 0);
|
||||
|
||||
cpu_exec_end(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (ret < 0) {
|
||||
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
|
||||
|
@ -558,8 +558,8 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
|
||||
tcg_ctx->gen_tb = NULL;
|
||||
}
|
||||
#endif
|
||||
if (qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
if (bql_locked()) {
|
||||
bql_unlock();
|
||||
}
|
||||
assert_no_pages_locked();
|
||||
}
|
||||
@ -680,10 +680,10 @@ static inline bool cpu_handle_halt(CPUState *cpu)
|
||||
#if defined(TARGET_I386)
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
apic_poll_irq(x86_cpu->apic_state);
|
||||
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
#endif /* TARGET_I386 */
|
||||
if (!cpu_has_work(cpu)) {
|
||||
@ -749,9 +749,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
#else
|
||||
if (replay_exception()) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
cc->tcg_ops->do_interrupt(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu->exception_index = -1;
|
||||
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
@ -812,7 +812,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
|
||||
/* Mask out external interrupts for this step. */
|
||||
@ -821,7 +821,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
@ -832,7 +832,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
|
||||
cpu->halted = 1;
|
||||
cpu->exception_index = EXCP_HLT;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#if defined(TARGET_I386)
|
||||
@ -843,14 +843,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
|
||||
do_cpu_init(x86_cpu);
|
||||
cpu->exception_index = EXCP_HALTED;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
else if (interrupt_request & CPU_INTERRUPT_RESET) {
|
||||
replay_interrupt();
|
||||
cpu_reset(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#endif /* !TARGET_I386 */
|
||||
@ -873,7 +873,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
*/
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
@ -892,7 +892,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
}
|
||||
|
||||
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
/* Finally, check if we need to exit to the main loop. */
|
||||
|
@ -1975,7 +1975,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
* @size: number of bytes
|
||||
* @mmu_idx: virtual address context
|
||||
* @ra: return address into tcg generated code, or 0
|
||||
* Context: iothread lock held
|
||||
* Context: BQL held
|
||||
*
|
||||
* Load @size bytes from @addr, which is memory-mapped i/o.
|
||||
* The bytes are concatenated in big-endian order with @ret_be.
|
||||
@ -2030,10 +2030,10 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
|
||||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
|
||||
type, ra, mr, mr_offset);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2054,12 +2054,12 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
|
||||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
|
||||
MMU_DATA_LOAD, ra, mr, mr_offset);
|
||||
b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
|
||||
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return int128_make128(b, a);
|
||||
}
|
||||
@ -2521,7 +2521,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
|
||||
* @size: number of bytes
|
||||
* @mmu_idx: virtual address context
|
||||
* @ra: return address into tcg generated code, or 0
|
||||
* Context: iothread lock held
|
||||
* Context: BQL held
|
||||
*
|
||||
* Store @size bytes at @addr, which is memory-mapped i/o.
|
||||
* The bytes to store are extracted in little-endian order from @val_le;
|
||||
@ -2577,10 +2577,10 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
|
||||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
|
||||
ra, mr, mr_offset);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2601,12 +2601,12 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
|
||||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
|
||||
mmu_idx, ra, mr, mr_offset);
|
||||
ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
|
||||
size - 8, mmu_idx, ra, mr, mr_offset + 8);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -123,12 +123,12 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
|
||||
|
||||
if (cpu->icount_budget == 0) {
|
||||
/*
|
||||
* We're called without the iothread lock, so must take it while
|
||||
* We're called without the BQL, so must take it while
|
||||
* we're calling timer handlers.
|
||||
*/
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
icount_notify_aio_contexts();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
rcu_add_force_rcu_notifier(&force_rcu.notifier);
|
||||
tcg_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
@ -91,9 +91,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
r = tcg_cpus_exec(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
switch (r) {
|
||||
case EXCP_DEBUG:
|
||||
cpu_handle_guest_debug(cpu);
|
||||
@ -105,9 +105,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
*/
|
||||
break;
|
||||
case EXCP_ATOMIC:
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
default:
|
||||
/* Ignore everything else? */
|
||||
break;
|
||||
@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
tcg_cpus_destroy(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
|
@ -111,7 +111,7 @@ static void rr_wait_io_event(void)
|
||||
|
||||
while (all_cpu_threads_idle() && replay_can_wait()) {
|
||||
rr_stop_kick_timer();
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
qemu_cond_wait_bql(first_cpu->halt_cond);
|
||||
}
|
||||
|
||||
rr_start_kick_timer();
|
||||
@ -188,7 +188,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
rcu_add_force_rcu_notifier(&force_rcu);
|
||||
tcg_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
@ -198,7 +198,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
|
||||
/* wait for initial kick-off after machine start */
|
||||
while (first_cpu->stopped) {
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
qemu_cond_wait_bql(first_cpu->halt_cond);
|
||||
|
||||
/* process any pending work */
|
||||
CPU_FOREACH(cpu) {
|
||||
@ -218,9 +218,9 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
/* Only used for icount_enabled() */
|
||||
int64_t cpu_budget = 0;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
replay_mutex_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (icount_enabled()) {
|
||||
int cpu_count = rr_cpu_count();
|
||||
@ -254,7 +254,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
if (icount_enabled()) {
|
||||
icount_prepare_for_run(cpu, cpu_budget);
|
||||
}
|
||||
@ -262,15 +262,15 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
if (icount_enabled()) {
|
||||
icount_process_data(cpu);
|
||||
}
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
break;
|
||||
} else if (r == EXCP_ATOMIC) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
break;
|
||||
}
|
||||
} else if (cpu->stop) {
|
||||
|
@ -88,7 +88,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu)
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
|
@ -649,7 +649,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
||||
|
||||
void cpu_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
cpu->interrupt_request |= mask;
|
||||
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ COREAUDIO_WRAPPER_FUNC(write, size_t, (HWVoiceOut *hw, void *buf, size_t size),
|
||||
#undef COREAUDIO_WRAPPER_FUNC
|
||||
|
||||
/*
|
||||
* callback to feed audiooutput buffer. called without iothread lock.
|
||||
* callback to feed audiooutput buffer. called without BQL.
|
||||
* allowed to lock "buf_mutex", but disallowed to have any other locks.
|
||||
*/
|
||||
static OSStatus audioDeviceIOProc(
|
||||
@ -538,7 +538,7 @@ static void update_device_playback_state(coreaudioVoiceOut *core)
|
||||
}
|
||||
}
|
||||
|
||||
/* called without iothread lock. */
|
||||
/* called without BQL. */
|
||||
static OSStatus handle_voice_change(
|
||||
AudioObjectID in_object_id,
|
||||
UInt32 in_number_addresses,
|
||||
@ -547,7 +547,7 @@ static OSStatus handle_voice_change(
|
||||
{
|
||||
coreaudioVoiceOut *core = in_client_data;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (core->outputDeviceID) {
|
||||
fini_out_device(core);
|
||||
@ -557,7 +557,7 @@ static OSStatus handle_voice_change(
|
||||
update_device_playback_state(core);
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -351,11 +351,11 @@ void process_queued_cpu_work(CPUState *cpu)
|
||||
* BQL, so it goes to sleep; start_exclusive() is sleeping too, so
|
||||
* neither CPU can proceed.
|
||||
*/
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
start_exclusive();
|
||||
wi->func(cpu, wi->data);
|
||||
end_exclusive();
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
} else {
|
||||
wi->func(cpu, wi->data);
|
||||
}
|
||||
|
@ -226,10 +226,9 @@ instruction. This could be a future optimisation.
|
||||
Emulated hardware state
|
||||
-----------------------
|
||||
|
||||
Currently thanks to KVM work any access to IO memory is automatically
|
||||
protected by the global iothread mutex, also known as the BQL (Big
|
||||
QEMU Lock). Any IO region that doesn't use global mutex is expected to
|
||||
do its own locking.
|
||||
Currently thanks to KVM work any access to IO memory is automatically protected
|
||||
by the BQL (Big QEMU Lock). Any IO region that doesn't use the BQL is expected
|
||||
to do its own locking.
|
||||
|
||||
However IO memory isn't the only way emulated hardware state can be
|
||||
modified. Some architectures have model specific registers that
|
||||
|
@ -5,7 +5,7 @@ the COPYING file in the top-level directory.
|
||||
|
||||
|
||||
This document explains the IOThread feature and how to write code that runs
|
||||
outside the QEMU global mutex.
|
||||
outside the BQL.
|
||||
|
||||
The main loop and IOThreads
|
||||
---------------------------
|
||||
@ -29,13 +29,13 @@ scalability bottleneck on hosts with many CPUs. Work can be spread across
|
||||
several IOThreads instead of just one main loop. When set up correctly this
|
||||
can improve I/O latency and reduce jitter seen by the guest.
|
||||
|
||||
The main loop is also deeply associated with the QEMU global mutex, which is a
|
||||
scalability bottleneck in itself. vCPU threads and the main loop use the QEMU
|
||||
global mutex to serialize execution of QEMU code. This mutex is necessary
|
||||
because a lot of QEMU's code historically was not thread-safe.
|
||||
The main loop is also deeply associated with the BQL, which is a
|
||||
scalability bottleneck in itself. vCPU threads and the main loop use the BQL
|
||||
to serialize execution of QEMU code. This mutex is necessary because a lot of
|
||||
QEMU's code historically was not thread-safe.
|
||||
|
||||
The fact that all I/O processing is done in a single main loop and that the
|
||||
QEMU global mutex is contended by all vCPU threads and the main loop explain
|
||||
BQL is contended by all vCPU threads and the main loop explain
|
||||
why it is desirable to place work into IOThreads.
|
||||
|
||||
The experimental virtio-blk data-plane implementation has been benchmarked and
|
||||
@ -66,7 +66,7 @@ There are several old APIs that use the main loop AioContext:
|
||||
|
||||
Since they implicitly work on the main loop they cannot be used in code that
|
||||
runs in an IOThread. They might cause a crash or deadlock if called from an
|
||||
IOThread since the QEMU global mutex is not held.
|
||||
IOThread since the BQL is not held.
|
||||
|
||||
Instead, use the AioContext functions directly (see include/block/aio.h):
|
||||
* aio_set_fd_handler() - monitor a file descriptor
|
||||
|
@ -594,7 +594,7 @@ blocking the guest and other background operations.
|
||||
Coroutine safety can be hard to prove, similar to thread safety. Common
|
||||
pitfalls are:
|
||||
|
||||
- The global mutex isn't held across ``qemu_coroutine_yield()``, so
|
||||
- The BQL isn't held across ``qemu_coroutine_yield()``, so
|
||||
operations that used to assume that they execute atomically may have
|
||||
to be more careful to protect against changes in the global state.
|
||||
|
||||
|
@ -184,7 +184,7 @@ modes.
|
||||
Reading and writing requests are created by CPU thread of QEMU. Later these
|
||||
requests proceed to block layer which creates "bottom halves". Bottom
|
||||
halves consist of callback and its parameters. They are processed when
|
||||
main loop locks the global mutex. These locks are not synchronized with
|
||||
main loop locks the BQL. These locks are not synchronized with
|
||||
replaying process because main loop also processes the events that do not
|
||||
affect the virtual machine state (like user interaction with monitor).
|
||||
|
||||
|
@ -19,7 +19,7 @@ Triggering reset
|
||||
|
||||
This section documents the APIs which "users" of a resettable object should use
|
||||
to control it. All resettable control functions must be called while holding
|
||||
the iothread lock.
|
||||
the BQL.
|
||||
|
||||
You can apply a reset to an object using ``resettable_assert_reset()``. You need
|
||||
to call ``resettable_release_reset()`` to release the object from reset. To
|
||||
|
@ -108,11 +108,11 @@ static int dump_cleanup(DumpState *s)
|
||||
s->guest_note = NULL;
|
||||
if (s->resume) {
|
||||
if (s->detached) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
vm_start();
|
||||
if (s->detached) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
migrate_del_blocker(&dump_migration_blocker);
|
||||
|
@ -84,7 +84,7 @@ apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
|
||||
}
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
||||
VirtIOBlockDataPlane **dataplane,
|
||||
Error **errp)
|
||||
@ -148,7 +148,7 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
|
||||
{
|
||||
VirtIOBlock *vblk;
|
||||
@ -179,7 +179,7 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
|
||||
g_free(s);
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
|
||||
@ -310,7 +310,7 @@ static void virtio_blk_data_plane_stop_vq_bh(void *opaque)
|
||||
virtio_queue_host_notifier_read(host_notifier);
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
|
||||
|
@ -1539,7 +1539,7 @@ static void virtio_blk_resize(void *opaque)
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
|
||||
|
||||
/*
|
||||
* virtio_notify_config() needs to acquire the global mutex,
|
||||
* virtio_notify_config() needs to acquire the BQL,
|
||||
* so it can't be called from an iothread. Instead, schedule
|
||||
* it to be run in the main context BH.
|
||||
*/
|
||||
|
@ -70,14 +70,14 @@ CPUState *cpu_create(const char *typename)
|
||||
* BQL here if we need to. cpu_interrupt assumes it is held.*/
|
||||
void cpu_reset_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
bool need_lock = !qemu_mutex_iothread_locked();
|
||||
bool need_lock = !bql_locked();
|
||||
|
||||
if (need_lock) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
cpu->interrupt_request &= ~mask;
|
||||
if (need_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL)
|
||||
*
|
||||
* Use with care; by the time this function returns, the returned pointer is
|
||||
* not protected by RCU anymore. If the caller is not within an RCU critical
|
||||
* section and does not hold the iothread lock, it must have other means of
|
||||
* section and does not hold the BQL, it must have other means of
|
||||
* protecting the pointer, such as a reference to the region that includes
|
||||
* the incoming ram_addr_t.
|
||||
*
|
||||
|
@ -1512,7 +1512,7 @@ void virtio_gpu_reset(VirtIODevice *vdev)
|
||||
g->reset_finished = false;
|
||||
qemu_bh_schedule(g->reset_bh);
|
||||
while (!g->reset_finished) {
|
||||
qemu_cond_wait_iothread(&g->reset_cond);
|
||||
qemu_cond_wait_bql(&g->reset_cond);
|
||||
}
|
||||
} else {
|
||||
virtio_gpu_reset_bh(g);
|
||||
|
@ -1665,7 +1665,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
|
||||
{
|
||||
bool use_iommu, pt;
|
||||
/* Whether we need to take the BQL on our own */
|
||||
bool take_bql = !qemu_mutex_iothread_locked();
|
||||
bool take_bql = !bql_locked();
|
||||
|
||||
assert(as);
|
||||
|
||||
@ -1683,7 +1683,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
|
||||
* it. We'd better make sure we have had it already, or, take it.
|
||||
*/
|
||||
if (take_bql) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
/* Turn off first then on the other */
|
||||
@ -1738,7 +1738,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
|
||||
}
|
||||
|
||||
if (take_bql) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
return use_iommu;
|
||||
|
@ -425,7 +425,7 @@ void xen_evtchn_set_callback_level(int level)
|
||||
* effect immediately. That just leaves interdomain loopback as the case
|
||||
* which uses the BH.
|
||||
*/
|
||||
if (!qemu_mutex_iothread_locked()) {
|
||||
if (!bql_locked()) {
|
||||
qemu_bh_schedule(s->gsi_bh);
|
||||
return;
|
||||
}
|
||||
@ -459,7 +459,7 @@ int xen_evtchn_set_callback_param(uint64_t param)
|
||||
* We need the BQL because set_callback_pci_intx() may call into PCI code,
|
||||
* and because we may need to manipulate the old and new GSI levels.
|
||||
*/
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
qemu_mutex_lock(&s->port_lock);
|
||||
|
||||
switch (type) {
|
||||
@ -1037,7 +1037,7 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port,
|
||||
XenEvtchnPort *p = &s->port_table[port];
|
||||
|
||||
/* Because it *might* be a PIRQ port */
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
switch (p->type) {
|
||||
case EVTCHNSTAT_closed:
|
||||
@ -1104,7 +1104,7 @@ int xen_evtchn_soft_reset(void)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
qemu_mutex_lock(&s->port_lock);
|
||||
|
||||
@ -1127,7 +1127,7 @@ int xen_evtchn_reset_op(struct evtchn_reset *reset)
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
return xen_evtchn_soft_reset();
|
||||
}
|
||||
|
||||
@ -1145,7 +1145,7 @@ int xen_evtchn_close_op(struct evtchn_close *close)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
qemu_mutex_lock(&s->port_lock);
|
||||
|
||||
ret = close_port(s, close->port, &flush_kvm_routes);
|
||||
@ -1272,7 +1272,7 @@ int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
|
||||
if (s->pirq[pirq->pirq].port) {
|
||||
return -EBUSY;
|
||||
@ -1601,7 +1601,7 @@ bool xen_evtchn_set_gsi(int gsi, int level)
|
||||
XenEvtchnState *s = xen_evtchn_singleton;
|
||||
int pirq;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
|
||||
return false;
|
||||
@ -1712,7 +1712,7 @@ void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
|
||||
return;
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
pirq = msi_pirq_target(addr, data);
|
||||
|
||||
@ -1749,7 +1749,7 @@ int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
|
||||
return 1; /* Not a PIRQ */
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
pirq = msi_pirq_target(address, data);
|
||||
if (!pirq || pirq >= s->nr_pirqs) {
|
||||
@ -1796,7 +1796,7 @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
pirq = msi_pirq_target(address, data);
|
||||
if (!pirq || pirq >= s->nr_pirqs) {
|
||||
@ -1824,7 +1824,7 @@ int xen_physdev_map_pirq(struct physdev_map_pirq *map)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
QEMU_LOCK_GUARD(&s->port_lock);
|
||||
|
||||
if (map->domid != DOMID_SELF && map->domid != xen_domid) {
|
||||
@ -1884,7 +1884,7 @@ int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
qemu_mutex_lock(&s->port_lock);
|
||||
|
||||
if (!pirq_inuse(s, pirq)) {
|
||||
@ -1924,7 +1924,7 @@ int xen_physdev_eoi_pirq(struct physdev_eoi *eoi)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
QEMU_LOCK_GUARD(&s->port_lock);
|
||||
|
||||
if (!pirq_inuse(s, pirq)) {
|
||||
@ -1956,7 +1956,7 @@ int xen_physdev_query_pirq(struct physdev_irq_status_query *query)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
QEMU_LOCK_GUARD(&s->port_lock);
|
||||
|
||||
if (!pirq_inuse(s, pirq)) {
|
||||
|
@ -176,7 +176,7 @@ int xen_gnttab_map_page(uint64_t idx, uint64_t gfn)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
QEMU_LOCK_GUARD(&s->gnt_lock);
|
||||
|
||||
xen_overlay_do_map_page(&s->gnt_aliases[idx], gpa);
|
||||
|
@ -194,7 +194,7 @@ int xen_overlay_map_shinfo_page(uint64_t gpa)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
if (s->shinfo_gpa) {
|
||||
/* If removing shinfo page, turn the kernel magic off first */
|
||||
|
@ -1341,7 +1341,7 @@ static void fire_watch_cb(void *opaque, const char *path, const char *token)
|
||||
{
|
||||
XenXenstoreState *s = opaque;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
/*
|
||||
* If there's a response pending, we obviously can't scribble over
|
||||
|
@ -934,7 +934,7 @@ void gicv3_cpuif_update(GICv3CPUState *cs)
|
||||
ARMCPU *cpu = ARM_CPU(cs->cpu);
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
|
||||
cs->hppi.grp, cs->hppi.prio);
|
||||
|
@ -106,7 +106,7 @@ static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
|
||||
QEMUS390FlicIO *cur, *next;
|
||||
uint8_t isc;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
if (!(flic->pending & FLIC_PENDING_IO)) {
|
||||
return 0;
|
||||
}
|
||||
@ -223,7 +223,7 @@ uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
g_assert(flic->pending & FLIC_PENDING_SERVICE);
|
||||
tmp = flic->service_param;
|
||||
flic->service_param = 0;
|
||||
@ -238,7 +238,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
|
||||
QEMUS390FlicIO *io;
|
||||
uint8_t isc;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) {
|
||||
return NULL;
|
||||
}
|
||||
@ -262,7 +262,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
|
||||
|
||||
void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
g_assert(flic->pending & FLIC_PENDING_MCHK_CR);
|
||||
flic->pending &= ~FLIC_PENDING_MCHK_CR;
|
||||
}
|
||||
@ -271,7 +271,7 @@ static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm)
|
||||
{
|
||||
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
/* multiplexing is good enough for sclp - kvm does it internally as well */
|
||||
flic->service_param |= parm;
|
||||
flic->pending |= FLIC_PENDING_SERVICE;
|
||||
@ -287,7 +287,7 @@ static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
|
||||
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
|
||||
QEMUS390FlicIO *io;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
io = g_new0(QEMUS390FlicIO, 1);
|
||||
io->id = subchannel_id;
|
||||
io->nr = subchannel_nr;
|
||||
@ -304,7 +304,7 @@ static void qemu_s390_inject_crw_mchk(S390FLICState *fs)
|
||||
{
|
||||
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
flic->pending |= FLIC_PENDING_MCHK_CR;
|
||||
|
||||
qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR);
|
||||
@ -330,7 +330,7 @@ bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic)
|
||||
|
||||
bool qemu_s390_flic_has_any(QEMUS390FLICState *flic)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
return !!flic->pending;
|
||||
}
|
||||
|
||||
@ -340,7 +340,7 @@ static void qemu_s390_flic_reset(DeviceState *dev)
|
||||
QEMUS390FlicIO *cur, *next;
|
||||
int isc;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
flic->simm = 0;
|
||||
flic->nimm = 0;
|
||||
flic->pending = 0;
|
||||
|
@ -36,7 +36,7 @@ static void cpu_mips_irq_request(void *opaque, int irq, int level)
|
||||
return;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
|
||||
if (level) {
|
||||
env->CP0_Cause |= 1 << (irq + CP0Ca_IP);
|
||||
|
@ -355,9 +355,9 @@ static void *edu_fact_thread(void *opaque)
|
||||
smp_mb__after_rmw();
|
||||
|
||||
if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
edu_raise_irq(edu, FACT_IRQ);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
|
||||
struct SRCSCRResetInfo *ri = data.host_ptr;
|
||||
IMX6SRCState *s = ri->s;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0);
|
||||
DPRINTF("reg[%s] <= 0x%" PRIx32 "\n",
|
||||
|
@ -136,7 +136,7 @@ static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
|
||||
struct SRCSCRResetInfo *ri = data.host_ptr;
|
||||
IMX7SRCState *s = ri->s;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|