diff options
author | Alex Bennée <alex.bennee@linaro.org> | 2017-02-23 18:29:12 +0000 |
---|---|---|
committer | Alex Bennée <alex.bennee@linaro.org> | 2017-02-24 10:32:45 +0000 |
commit | e5143e30fb87fbf179029387f83f98a5a9b27f19 (patch) | |
tree | b328a8046041f35bc3d03611f064d5e7098c2b7f | |
parent | 8d04fb55dec381bc5105cb47f29d918e579e8cbd (diff) | |
download | qemu-e5143e30fb87fbf179029387f83f98a5a9b27f19.zip |
tcg: remove global exit_request
There are now only two uses of the global exit_request left.
The first ensures we exit the run_loop when we first start to process
pending work and in the kick handler. This is just as easily done by
setting the first_cpu->exit_request flag.
The second use is in the round robin kick routine. The global
exit_request ensured every vCPU would set its local exit_request and
cause a full exit of the loop. Now the iothread isn't being held while
running we can just rely on the kick handler to push us out as intended.
We lightly re-factor the main vCPU thread to ensure cpu->exit_requests
cause us to exit the main loop and process any IO requests that might
come along. As an cpu->exit_request may legitimately get squashed
while processing the EXCP_INTERRUPT exception we also check
cpu->queued_work_first to ensure queued work is expedited as soon as
possible.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
-rw-r--r-- | cpu-exec-common.c | 2 | ||||
-rw-r--r-- | cpu-exec.c | 20 | ||||
-rw-r--r-- | cpus.c | 19 | ||||
-rw-r--r-- | include/exec/exec-all.h | 3 |
4 files changed, 18 insertions, 26 deletions
diff --git a/cpu-exec-common.c b/cpu-exec-common.c index e2bc053372..0504a9457b 100644 --- a/cpu-exec-common.c +++ b/cpu-exec-common.c @@ -23,8 +23,6 @@ #include "exec/exec-all.h" #include "exec/memory-internal.h" -bool exit_request; - /* exit the current TB, but without causing any exception to be raised */ void cpu_loop_exit_noexc(CPUState *cpu) { diff --git a/cpu-exec.c b/cpu-exec.c index 1bd3d72002..85f14d4194 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -568,15 +568,13 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, *tb_exit = ret & TB_EXIT_MASK; switch (*tb_exit) { case TB_EXIT_REQUESTED: - /* Something asked us to stop executing - * chained TBs; just continue round the main - * loop. Whatever requested the exit will also - * have set something else (eg exit_request or - * interrupt_request) which we will handle - * next time around the loop. But we need to - * ensure the zeroing of tcg_exit_req (see cpu_tb_exec) - * comes before the next read of cpu->exit_request - * or cpu->interrupt_request. + /* Something asked us to stop executing chained TBs; just + * continue round the main loop. Whatever requested the exit + * will also have set something else (eg interrupt_request) + * which we will handle next time around the loop. But we + * need to ensure the tcg_exit_req read in generated code + * comes before the next read of cpu->exit_request or + * cpu->interrupt_request. */ smp_mb(); *last_tb = NULL; @@ -630,10 +628,6 @@ int cpu_exec(CPUState *cpu) rcu_read_lock(); - if (unlikely(atomic_mb_read(&exit_request))) { - cpu->exit_request = 1; - } - cc->cpu_exec_enter(cpu); /* Calculate difference between guest clock and host clock. @@ -793,7 +793,6 @@ static inline int64_t qemu_tcg_next_kick(void) static void qemu_cpu_kick_rr_cpu(void) { CPUState *cpu; - atomic_mb_set(&exit_request, 1); do { cpu = atomic_mb_read(&tcg_current_rr_cpu); if (cpu) { @@ -1316,11 +1315,11 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) start_tcg_kick_timer(); - /* process any pending work */ - atomic_mb_set(&exit_request, 1); - cpu = first_cpu; + /* process any pending work */ + cpu->exit_request = 1; + while (1) { /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ qemu_account_warp_timer(); @@ -1329,7 +1328,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) cpu = first_cpu; } - for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) { + while (cpu && !cpu->queued_work_first && !cpu->exit_request) { + atomic_mb_set(&tcg_current_rr_cpu, cpu); qemu_clock_enable(QEMU_CLOCK_VIRTUAL, @@ -1349,12 +1349,15 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) break; } - } /* for cpu.. */ + cpu = CPU_NEXT(cpu); + } /* while (cpu && !cpu->exit_request).. */ + /* Does not need atomic_mb_set because a spurious wakeup is okay. */ atomic_set(&tcg_current_rr_cpu, NULL); - /* Pairs with smp_wmb in qemu_cpu_kick. */ - atomic_mb_set(&exit_request, 0); + if (cpu && cpu->exit_request) { + atomic_mb_set(&cpu->exit_request, 0); + } handle_icount_deadline(); diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 4e34fc4cc1..82f0e12327 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -404,7 +404,4 @@ bool memory_region_is_unassigned(MemoryRegion *mr); /* vl.c */ extern int singlestep; -/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */ -extern bool exit_request; - #endif |