diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2015-11-06 11:31:40 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2015-11-06 11:31:40 +0000 |
commit | 9319738080faeb09876ce2017fcaea4937c475ee (patch) | |
tree | bf2b6e1c46b44d0e28fe4de89b034de7f2e8343d /cpu-exec.c | |
parent | 3aa88b31290c7cbbbae344efdece659194b95c70 (diff) | |
parent | ee312992a323530ea2cda8680f3a34746c72db8f (diff) | |
download | qemu-9319738080faeb09876ce2017fcaea4937c475ee.zip |
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream-replay' into staging
So here it is, let's see what happens.
# gpg: Signature made Fri 06 Nov 2015 09:30:34 GMT using RSA key ID 78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg: aka "Paolo Bonzini <pbonzini@redhat.com>"
* remotes/bonzini/tags/for-upstream-replay:
replay: recording of the user input
replay: command line options
replay: replay blockers for devices
replay: initialization and deinitialization
replay: ptimer
bottom halves: introduce bh call function
replay: checkpoints
icount: improve counting for record/replay
replay: shutdown event
replay: recording and replaying clock ticks
replay: asynchronous events infrastructure
replay: interrupts and exceptions
cpu: replay instructions sequence
cpu-exec: allow temporary disabling icount
replay: introduce icount event
replay: introduce mutex to protect the replay log
replay: internal functions for replay log
replay: global variables and function stubs
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'cpu-exec.c')
-rw-r--r-- | cpu-exec.c | 55 |
1 files changed, 42 insertions, 13 deletions
diff --git a/cpu-exec.c b/cpu-exec.c index 2cfb3d0ad9..c88d0ffdcd 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -30,6 +30,7 @@ #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) #include "hw/i386/apic.h" #endif +#include "sysemu/replay.h" /* -icount align implementation. */ @@ -184,7 +185,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr) /* Execute the code without caching the generated code. An interpreter could be used if available. */ static void cpu_exec_nocache(CPUState *cpu, int max_cycles, - TranslationBlock *orig_tb) + TranslationBlock *orig_tb, bool ignore_icount) { TranslationBlock *tb; @@ -194,7 +195,8 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles, max_cycles = CF_COUNT_MASK; tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, - max_cycles | CF_NOCACHE); + max_cycles | CF_NOCACHE + | (ignore_icount ? CF_IGNORE_ICOUNT : 0)); tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb; cpu->current_tb = tb; /* execute the generated code */ @@ -345,21 +347,25 @@ int cpu_exec(CPUState *cpu) uintptr_t next_tb; SyncClocks sc; + /* replay_interrupt may need current_cpu */ + current_cpu = cpu; + if (cpu->halted) { #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) - if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) + && replay_interrupt()) { apic_poll_irq(x86_cpu->apic_state); cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); } #endif if (!cpu_has_work(cpu)) { + current_cpu = NULL; return EXCP_HALTED; } cpu->halted = 0; } - current_cpu = cpu; atomic_mb_set(&tcg_current_cpu, cpu); rcu_read_lock(); @@ -401,10 +407,22 @@ int cpu_exec(CPUState *cpu) cpu->exception_index = -1; break; #else - cc->do_interrupt(cpu); - cpu->exception_index = -1; + if (replay_exception()) { + cc->do_interrupt(cpu); + cpu->exception_index = -1; + } else if (!replay_has_interrupt()) { + /* give a chance to iothread in replay mode */ + ret = EXCP_INTERRUPT; + break; + } #endif } + } else if (replay_has_exception() + && cpu->icount_decr.u16.low + cpu->icount_extra == 0) { + /* try to cause an exception pending in the log */ + cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true); + ret = -1; + break; } next_tb = 0; /* force lookup of first TB */ @@ -420,30 +438,40 @@ int cpu_exec(CPUState *cpu) cpu->exception_index = EXCP_DEBUG; cpu_loop_exit(cpu); } - if (interrupt_request & CPU_INTERRUPT_HALT) { + if (replay_mode == REPLAY_MODE_PLAY + && !replay_has_interrupt()) { + /* Do nothing */ + } else if (interrupt_request & CPU_INTERRUPT_HALT) { + replay_interrupt(); cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->halted = 1; cpu->exception_index = EXCP_HLT; cpu_loop_exit(cpu); } #if defined(TARGET_I386) - if (interrupt_request & CPU_INTERRUPT_INIT) { + else if (interrupt_request & CPU_INTERRUPT_INIT) { + replay_interrupt(); cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0); do_cpu_init(x86_cpu); cpu->exception_index = EXCP_HALTED; cpu_loop_exit(cpu); } #else - if (interrupt_request & CPU_INTERRUPT_RESET) { + else if (interrupt_request & CPU_INTERRUPT_RESET) { + replay_interrupt(); cpu_reset(cpu); + cpu_loop_exit(cpu); } #endif /* The target hook has 3 exit conditions: False when the interrupt isn't processed, True when it is, and we should restart on a new TB, and via longjmp via cpu_loop_exit. */ - if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { - next_tb = 0; + else { + replay_interrupt(); + if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { + next_tb = 0; + } } /* Don't use the cached interrupt_request value, do_interrupt may have updated the EXITTB flag. */ @@ -454,7 +482,8 @@ int cpu_exec(CPUState *cpu) next_tb = 0; } } - if (unlikely(cpu->exit_request)) { + if (unlikely(cpu->exit_request + || replay_has_interrupt())) { cpu->exit_request = 0; cpu->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cpu); @@ -519,7 +548,7 @@ int cpu_exec(CPUState *cpu) if (insns_left > 0) { /* Execute remaining instructions. */ tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); - cpu_exec_nocache(cpu, insns_left, tb); + cpu_exec_nocache(cpu, insns_left, tb, false); align_clocks(&sc, cpu); } cpu->exception_index = EXCP_INTERRUPT; |