summaryrefslogtreecommitdiff
path: root/tcg/tcg.c
diff options
context:
space:
mode:
authorEmilio G. Cota <cota@braap.org>2017-08-01 15:11:12 -0400
committerRichard Henderson <richard.henderson@linaro.org>2018-06-15 07:42:55 -1000
commit128ed2278c4e6ad063f101c5dda7999b43f2d8a3 (patch)
treeaaca67bc2efb756b33cecc236da40344119e5f81 /tcg/tcg.c
parentbe2cdc5e352eb28b4ff631f053a261d91e6af78e (diff)
downloadqemu-128ed2278c4e6ad063f101c5dda7999b43f2d8a3.zip
tcg: move tb_ctx.tb_phys_invalidate_count to tcg_ctx
Thereby making it per-TCGContext. Once we remove tb_lock, this will avoid an atomic increment every time a TB is invalidated. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Emilio G. Cota <cota@braap.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/tcg.c')
-rw-r--r--tcg/tcg.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 62e3391020..1d1dfd7f7c 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -791,6 +791,20 @@ size_t tcg_code_capacity(void)
return capacity;
}
+size_t tcg_tb_phys_invalidate_count(void)
+{
+ unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
+ unsigned int i;
+ size_t total = 0;
+
+ for (i = 0; i < n_ctxs; i++) {
+ const TCGContext *s = atomic_read(&tcg_ctxs[i]);
+
+ total += atomic_read(&s->tb_phys_invalidate_count);
+ }
+ return total;
+}
+
/* pool based memory allocation */
void *tcg_malloc_internal(TCGContext *s, int size)
{