diff options
author | Aurelien Jarno <aurelien@aurel32.net> | 2015-07-19 13:50:32 +0200 |
---|---|---|
committer | Richard Henderson <rth@twiddle.net> | 2015-09-02 14:24:10 -0700 |
commit | 08b0b23be639b955e5e3d84dc42fa4e5ce6a8728 (patch) | |
tree | e81115863262036672d3c2e2d45b1a74d6982b1c | |
parent | 352bcb0a2b816ff9ab9d75d0f2384650d9e9ab19 (diff) | |
download | qemu-08b0b23be639b955e5e3d84dc42fa4e5ce6a8728.zip |
tcg/i386: omit a few REXW prefixes in softmmu code
When computing the TLB address we are likely to mask out the high
32-bits by using shr + and. We can use 32-bit instructions in that
case. This saves 2 bytes per TLB access.
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Message-Id: <1437306632-20655-1-git-send-email-aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
-rw-r--r-- | tcg/i386/tcg-target.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c index d2adbc4d17..9187d34caf 100644 --- a/tcg/i386/tcg-target.c +++ b/tcg/i386/tcg-target.c @@ -1178,8 +1178,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, const TCGReg r0 = TCG_REG_L0; const TCGReg r1 = TCG_REG_L1; TCGType ttype = TCG_TYPE_I32; - TCGType htype = TCG_TYPE_I32; - int trexw = 0, hrexw = 0; + TCGType tlbtype = TCG_TYPE_I32; + int trexw = 0, hrexw = 0, tlbrexw = 0; int s_mask = (1 << (opc & MO_SIZE)) - 1; bool aligned = (opc & MO_AMASK) == MO_ALIGN || s_mask == 0; @@ -1189,12 +1189,15 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, trexw = P_REXW; } if (TCG_TYPE_PTR == TCG_TYPE_I64) { - htype = TCG_TYPE_I64; hrexw = P_REXW; + if (TARGET_PAGE_BITS + CPU_TLB_BITS > 32) { + tlbtype = TCG_TYPE_I64; + tlbrexw = P_REXW; + } } } - tcg_out_mov(s, htype, r0, addrlo); + tcg_out_mov(s, tlbtype, r0, addrlo); if (aligned) { tcg_out_mov(s, ttype, r1, addrlo); } else { @@ -1203,12 +1206,12 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask); } - tcg_out_shifti(s, SHIFT_SHR + hrexw, r0, + tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tgen_arithi(s, ARITH_AND + trexw, r1, TARGET_PAGE_MASK | (aligned ? s_mask : 0), 0); - tgen_arithi(s, ARITH_AND + hrexw, r0, + tgen_arithi(s, ARITH_AND + tlbrexw, r0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0); tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0, |