diff options
author | Sergey Fedorov <serge.fdrv@gmail.com> | 2016-04-22 19:08:47 +0300 |
---|---|---|
committer | Richard Henderson <rth@twiddle.net> | 2016-05-12 14:06:41 -1000 |
commit | 0d07abf05e98903c7faf204a9a90f7d45b7554dc (patch) | |
tree | 1090615eecc65e59f17b1f236b47495aaecf728e | |
parent | 399f1648573d49b9411089e4e2ea62b6357b166e (diff) | |
download | qemu-0d07abf05e98903c7faf204a9a90f7d45b7554dc.zip |
tcg/i386: Make direct jump patching thread-safe
Ensure direct jump patching in i386 is atomic by:
* naturally aligning a location of direct jump address;
* using atomic_read()/atomic_set() for code patching.
tcg_out_nopn() implementation:
Suggested-by: Richard Henderson <rth@twiddle.net>.
Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Message-Id: <1461341333-19646-6-git-send-email-sergey.fedorov@linaro.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
-rw-r--r-- | include/exec/exec-all.h | 2 | ||||
-rw-r--r-- | tcg/i386/tcg-target.inc.c | 23 |
2 files changed, 24 insertions, 1 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index d49befdba5..0ab7803259 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -313,7 +313,7 @@ void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) { /* patch the branch destination */ - stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4)); + atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); /* no need to flush icache explicitly */ } #elif defined(__s390x__) diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c index 007407c3fc..8d242a6665 100644 --- a/tcg/i386/tcg-target.inc.c +++ b/tcg/i386/tcg-target.inc.c @@ -1123,6 +1123,21 @@ static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest) tcg_out_branch(s, 0, dest); } +static void tcg_out_nopn(TCGContext *s, int n) +{ + int i; + /* Emit 1 or 2 operand size prefixes for the standard one byte nop, + * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the + * duplicate prefix, and all of the interesting recent cores can + * decode and discard the duplicates in a single cycle. + */ + tcg_debug_assert(n >= 1); + for (i = 1; i < n; ++i) { + tcg_out8(s, 0x66); + } + tcg_out8(s, 0x90); +} + #if defined(CONFIG_SOFTMMU) /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) @@ -1777,6 +1792,14 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_goto_tb: if (s->tb_jmp_offset) { /* direct jump method */ + int gap; + /* jump displacement must be aligned for atomic patching; + * see if we need to add extra nops before jump + */ + gap = tcg_pcrel_diff(s, QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4)); + if (gap != 1) { + tcg_out_nopn(s, gap - 1); + } tcg_out8(s, OPC_JMP_long); /* jmp im */ s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); tcg_out32(s, 0); |