summaryrefslogtreecommitdiff
path: root/accel/tcg/cputlb.c
diff options
context:
space:
mode:
authorEmilio G. Cota <cota@braap.org>2017-07-06 14:42:26 -0400
committerRichard Henderson <richard.henderson@linaro.org>2017-10-10 07:37:10 -0700
commit83974cf4f8a46513f799ff0d7c7eb151acafda7b (patch)
treece7c8add92d4cd60e832564b066be0c6a12c0e2b /accel/tcg/cputlb.c
parent567d0a19c7998fa366598b83d5a6e5f0759d3ea9 (diff)
downloadqemu-83974cf4f8a46513f799ff0d7c7eb151acafda7b.zip
cputlb: bring back tlb_flush_count under !TLB_DEBUG
Commit f0aff0f124 ("cputlb: add assert_cpu_is_self checks") buried the increment of tlb_flush_count under TLB_DEBUG. This results in "info jit" always (mis)reporting 0 TLB flushes when !TLB_DEBUG. Besides, under MTTCG tlb_flush_count is updated by several threads, so in order not to lose counts we'd either have to use atomic ops or distribute the counter, which is more scalable. This patch does the latter by embedding tlb_flush_count in CPUArchState. The global count is then easily obtained by iterating over the CPU list. Note that this change also requires updating the accessors to tlb_flush_count to use atomic_read/set whenever there may be conflicting accesses (as defined in C11) to it. Reviewed-by: Richard Henderson <rth@twiddle.net> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Emilio G. Cota <cota@braap.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r--accel/tcg/cputlb.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index bcbcc4db6c..5b1ef1442c 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -92,8 +92,18 @@ static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
}
}
-/* statistics */
-int tlb_flush_count;
+size_t tlb_flush_count(void)
+{
+ CPUState *cpu;
+ size_t count = 0;
+
+ CPU_FOREACH(cpu) {
+ CPUArchState *env = cpu->env_ptr;
+
+ count += atomic_read(&env->tlb_flush_count);
+ }
+ return count;
+}
/* This is OK because CPU architectures generally permit an
* implementation to drop entries from the TLB at any time, so
@@ -112,7 +122,8 @@ static void tlb_flush_nocheck(CPUState *cpu)
}
assert_cpu_is_self(cpu);
- tlb_debug("(count: %d)\n", tlb_flush_count++);
+ atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
+ tlb_debug("(count: %zu)\n", tlb_flush_count());
tb_lock();