summaryrefslogtreecommitdiff
path: root/target/arm/translate-a64.c
diff options
context:
space:
mode:
authorEmilio G. Cota <cota@braap.org>2017-07-14 18:20:49 -0400
committerRichard Henderson <richard.henderson@linaro.org>2017-10-24 13:53:41 -0700
commit2399d4e7cec22ecf1c51062d2ebfd45220dbaace (patch)
tree45a5e53582ab2bedd830e70e191e6e2399311217 /target/arm/translate-a64.c
parentc5a49c63fa26e8825ad101dfe86339ae4c216539 (diff)
downloadqemu-2399d4e7cec22ecf1c51062d2ebfd45220dbaace.zip
target/arm: check CF_PARALLEL instead of parallel_cpus
Thereby decoupling the resulting translated code from the current state of the system. Reviewed-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Emilio G. Cota <cota@braap.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target/arm/translate-a64.c')
-rw-r--r--target/arm/translate-a64.c31
1 files changed, 25 insertions, 6 deletions
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index e9bee8c196..f6b364c04b 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -1336,13 +1336,18 @@ static void handle_hint(DisasContext *s, uint32_t insn,
case 3: /* WFI */
s->base.is_jmp = DISAS_WFI;
return;
+ /* When running in MTTCG we don't generate jumps to the yield and
+ * WFE helpers as it won't affect the scheduling of other vCPUs.
+ * If we wanted to more completely model WFE/SEV so we don't busy
+ * spin unnecessarily we would need to do something more involved.
+ */
case 1: /* YIELD */
- if (!parallel_cpus) {
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
s->base.is_jmp = DISAS_YIELD;
}
return;
case 2: /* WFE */
- if (!parallel_cpus) {
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
s->base.is_jmp = DISAS_WFE;
}
return;
@@ -1931,11 +1936,25 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
MO_64 | MO_ALIGN | s->be_data);
tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
} else if (s->be_data == MO_LE) {
- gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
- cpu_reg(s, rt), cpu_reg(s, rt2));
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
+ cpu_exclusive_addr,
+ cpu_reg(s, rt),
+ cpu_reg(s, rt2));
+ } else {
+ gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
+ cpu_reg(s, rt), cpu_reg(s, rt2));
+ }
} else {
- gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
- cpu_reg(s, rt), cpu_reg(s, rt2));
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
+ cpu_exclusive_addr,
+ cpu_reg(s, rt),
+ cpu_reg(s, rt2));
+ } else {
+ gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
+ cpu_reg(s, rt), cpu_reg(s, rt2));
+ }
}
} else {
tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,