diff options
author | Emilio G. Cota <cota@braap.org> | 2017-07-16 15:13:52 -0400 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2017-10-24 13:53:42 -0700 |
commit | e82d5a2460b0e176128027651ff9b104e4bdf5cc (patch) | |
tree | 5e0cf9efe35adea7119254fdd5a4ff2df306d73e | |
parent | 87d757d60d66d5ee1608460b0f1e07e2b758db9c (diff) | |
download | qemu-e82d5a2460b0e176128027651ff9b104e4bdf5cc.zip |
tcg: check CF_PARALLEL instead of parallel_cpus
Thereby decoupling the resulting translated code from the current state
of the system.
The tb->cflags field is not passed to tcg generation functions. So
we add a field to TCGContext, storing there a copy of tb->cflags.
Most architectures have <= 32 registers, which results in a 4-byte hole
in TCGContext. Use this hole for the new field.
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r-- | accel/tcg/translate-all.c | 1 | ||||
-rw-r--r-- | tcg/tcg-op.c | 10 | ||||
-rw-r--r-- | tcg/tcg.h | 1 |
3 files changed, 7 insertions, 5 deletions
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 91fd6e444b..dcd47cd692 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -1296,6 +1296,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, tb->flags = flags; tb->cflags = cflags; tb->trace_vcpu_dstate = *cpu->trace_dstate; + tcg_ctx.tb_cflags = cflags; #ifdef CONFIG_PROFILER tcg_ctx.tb_count1++; /* includes aborted translations because of diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 9561510d9c..8c7668de60 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -121,7 +121,7 @@ void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, void tcg_gen_mb(TCGBar mb_type) { - if (parallel_cpus) { + if (tcg_ctx.tb_cflags & CF_PARALLEL) { tcg_gen_op1(INDEX_op_mb, mb_type); } } @@ -2780,7 +2780,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, { memop = tcg_canonicalize_memop(memop, 0, 0); - if (!parallel_cpus) { + if (!(tcg_ctx.tb_cflags & CF_PARALLEL)) { TCGv_i32 t1 = tcg_temp_new_i32(); TCGv_i32 t2 = tcg_temp_new_i32(); @@ -2824,7 +2824,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, { memop = tcg_canonicalize_memop(memop, 1, 0); - if (!parallel_cpus) { + if (!(tcg_ctx.tb_cflags & CF_PARALLEL)) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); @@ -3001,7 +3001,7 @@ static void * const table_##NAME[16] = { \ void tcg_gen_atomic_##NAME##_i32 \ (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \ { \ - if (parallel_cpus) { \ + if (tcg_ctx.tb_cflags & CF_PARALLEL) { \ do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ } else { \ do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \ @@ -3011,7 +3011,7 @@ void tcg_gen_atomic_##NAME##_i32 \ void tcg_gen_atomic_##NAME##_i64 \ (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \ { \ - if (parallel_cpus) { \ + if (tcg_ctx.tb_cflags & CF_PARALLEL) { \ do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ } else { \ do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \ @@ -614,6 +614,7 @@ struct TCGContext { uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */ TCGRegSet reserved_regs; + uint32_t tb_cflags; /* cflags of the current TB */ intptr_t current_frame_offset; intptr_t frame_start; intptr_t frame_end; |