summaryrefslogtreecommitdiff
path: root/cpu-exec.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-01-21 12:09:14 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2015-02-16 17:30:19 +0100
commit79e2b9aeccedbfde762b05da662132c7fda292be (patch)
treebf4d111dcb48864e149d2414dde293bdf92b0ef4 /cpu-exec.c
parent9d82b5a792236db31a75b9db5c93af69ac07c7c5 (diff)
downloadqemu-79e2b9aeccedbfde762b05da662132c7fda292be.zip
exec: RCUify AddressSpaceDispatch
Note that even after this patch, most callers of address_space_* functions must still be under the big QEMU lock, otherwise the memory region returned by address_space_translate can disappear as soon as address_space_translate returns. This will be fixed in the next part of this series. Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'cpu-exec.c')
-rw-r--r--cpu-exec.c25
1 files changed, 24 insertions, 1 deletions
diff --git a/cpu-exec.c b/cpu-exec.c
index 98f968df60..adb939a994 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -26,6 +26,7 @@
#include "qemu/timer.h"
#include "exec/address-spaces.h"
#include "exec/memory-internal.h"
+#include "qemu/rcu.h"
/* -icount align implementation. */
@@ -146,8 +147,27 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
void cpu_reload_memory_map(CPUState *cpu)
{
+ AddressSpaceDispatch *d;
+
+ if (qemu_in_vcpu_thread()) {
+ /* Do not let the guest prolong the critical section as much as it
+ * as it desires.
+ *
+ * Currently, this is prevented by the I/O thread's periodinc kicking
+ * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
+ * but this will go away once TCG's execution moves out of the global
+ * mutex.
+ *
+ * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
+ * only protects cpu->as->dispatch. Since we reload it below, we can
+ * split the critical section.
+ */
+ rcu_read_unlock();
+ rcu_read_lock();
+ }
+
/* The CPU and TLB are protected by the iothread lock. */
- AddressSpaceDispatch *d = cpu->as->dispatch;
+ d = atomic_rcu_read(&cpu->as->dispatch);
cpu->memory_dispatch = d;
tlb_flush(cpu, 1);
}
@@ -362,6 +382,8 @@ int cpu_exec(CPUArchState *env)
* an instruction scheduling constraint on modern architectures. */
smp_mb();
+ rcu_read_lock();
+
if (unlikely(exit_request)) {
cpu->exit_request = 1;
}
@@ -564,6 +586,7 @@ int cpu_exec(CPUArchState *env)
} /* for(;;) */
cc->cpu_exec_exit(cpu);
+ rcu_read_unlock();
/* fail safe : never use current_cpu outside cpu_exec() */
current_cpu = NULL;