diff options
author | Thomas Huth <thuth@redhat.com> | 2017-09-11 18:33:24 -0300 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2017-09-17 06:52:19 -0700 |
commit | da1849c1eba50aa372f87c7945d7b230eb2b2fb2 (patch) | |
tree | 74ffe0d7ed6dd62d062a58b530414e7cb4410447 /accel/tcg | |
parent | ba026602a673677735428e64e621cdf95b5cd6d9 (diff) | |
download | qemu-da1849c1eba50aa372f87c7945d7b230eb2b2fb2.zip |
accel/tcg: move softmmu_template.h to accel/tcg/
The header is only used by accel/tcg/cputlb.c so we can
move it to the accel/tcg/ folder, too.
Signed-off-by: Thomas Huth <thuth@redhat.com>
[PMD: reword commit title to match series]
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20170911213328.9701-2-f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r-- | accel/tcg/softmmu_template.h | 433 |
1 files changed, 433 insertions, 0 deletions
diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h new file mode 100644 index 0000000000..d7563292a5 --- /dev/null +++ b/accel/tcg/softmmu_template.h @@ -0,0 +1,433 @@ +/* + * Software MMU support + * + * Generate helpers used by TCG for qemu_ld/st ops and code load + * functions. + * + * Included from target op helpers and exec.c. + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#if DATA_SIZE == 8 +#define SUFFIX q +#define LSUFFIX q +#define SDATA_TYPE int64_t +#define DATA_TYPE uint64_t +#elif DATA_SIZE == 4 +#define SUFFIX l +#define LSUFFIX l +#define SDATA_TYPE int32_t +#define DATA_TYPE uint32_t +#elif DATA_SIZE == 2 +#define SUFFIX w +#define LSUFFIX uw +#define SDATA_TYPE int16_t +#define DATA_TYPE uint16_t +#elif DATA_SIZE == 1 +#define SUFFIX b +#define LSUFFIX ub +#define SDATA_TYPE int8_t +#define DATA_TYPE uint8_t +#else +#error unsupported data size +#endif + + +/* For the benefit of TCG generated code, we want to avoid the complication + of ABI-specific return type promotion and always return a value extended + to the register size of the host. This is tcg_target_long, except in the + case of a 32-bit host and 64-bit data, and for that we always have + uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ +#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8 +# define WORD_TYPE DATA_TYPE +# define USUFFIX SUFFIX +#else +# define WORD_TYPE tcg_target_ulong +# define USUFFIX glue(u, SUFFIX) +# define SSUFFIX glue(s, SUFFIX) +#endif + +#ifdef SOFTMMU_CODE_ACCESS +#define READ_ACCESS_TYPE MMU_INST_FETCH +#define ADDR_READ addr_code +#else +#define READ_ACCESS_TYPE MMU_DATA_LOAD +#define ADDR_READ addr_read +#endif + +#if DATA_SIZE == 8 +# define BSWAP(X) bswap64(X) +#elif DATA_SIZE == 4 +# define BSWAP(X) bswap32(X) +#elif DATA_SIZE == 2 +# define BSWAP(X) bswap16(X) +#else +# define BSWAP(X) (X) +#endif + +#if DATA_SIZE == 1 +# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) +# define helper_be_ld_name helper_le_ld_name +# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX) +# define helper_be_lds_name helper_le_lds_name +# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX) +# define helper_be_st_name helper_le_st_name +#else +# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX) +# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX) +# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX) +# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX) +# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX) +# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) +#endif + +#ifndef SOFTMMU_CODE_ACCESS +static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, + size_t mmu_idx, size_t index, + target_ulong addr, + uintptr_t retaddr) +{ + CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; + return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, DATA_SIZE); +} +#endif + +WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + unsigned mmu_idx = get_mmuidx(oi); + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + uintptr_t haddr; + DATA_TYPE res; + + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); + res = TGT_LE(res); + return res; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + DATA_TYPE res1, res2; + unsigned shift; + do_unaligned_access: + addr1 = addr & ~(DATA_SIZE - 1); + addr2 = addr1 + DATA_SIZE; + res1 = helper_le_ld_name(env, addr1, oi, retaddr); + res2 = helper_le_ld_name(env, addr2, oi, retaddr); + shift = (addr & (DATA_SIZE - 1)) * 8; + + /* Little-endian combine. */ + res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); + return res; + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; +#if DATA_SIZE == 1 + res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); +#else + res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr); +#endif + return res; +} + +#if DATA_SIZE > 1 +WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + unsigned mmu_idx = get_mmuidx(oi); + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + uintptr_t haddr; + DATA_TYPE res; + + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); + res = TGT_BE(res); + return res; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + DATA_TYPE res1, res2; + unsigned shift; + do_unaligned_access: + addr1 = addr & ~(DATA_SIZE - 1); + addr2 = addr1 + DATA_SIZE; + res1 = helper_be_ld_name(env, addr1, oi, retaddr); + res2 = helper_be_ld_name(env, addr2, oi, retaddr); + shift = (addr & (DATA_SIZE - 1)) * 8; + + /* Big-endian combine. */ + res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); + return res; + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; + res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); + return res; +} +#endif /* DATA_SIZE > 1 */ + +#ifndef SOFTMMU_CODE_ACCESS + +/* Provide signed versions of the load routines as well. We can of course + avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ +#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS +WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr); +} + +# if DATA_SIZE > 1 +WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr); +} +# endif +#endif + +static inline void glue(io_write, SUFFIX)(CPUArchState *env, + size_t mmu_idx, size_t index, + DATA_TYPE val, + target_ulong addr, + uintptr_t retaddr) +{ + CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; + return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, DATA_SIZE); +} + +void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + unsigned mmu_idx = get_mmuidx(oi); + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + uintptr_t haddr; + + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + val = TGT_LE(val); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); + return; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + int i, index2; + target_ulong page2, tlb_addr2; + do_unaligned_access: + /* Ensure the second page is in the TLB. Note that the first page + is already guaranteed to be filled, and that the second page + cannot evict the first. */ + page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; + index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; + if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + && !VICTIM_TLB_HIT(addr_write, page2)) { + tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* XXX: not efficient, but simple. */ + /* This loop must go in the forward direction to avoid issues + with self-modifying code in Windows 64-bit. */ + for (i = 0; i < DATA_SIZE; ++i) { + /* Little-endian extract. */ + uint8_t val8 = val >> (i * 8); + glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, + oi, retaddr); + } + return; + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; +#if DATA_SIZE == 1 + glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); +#else + glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); +#endif +} + +#if DATA_SIZE > 1 +void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + unsigned mmu_idx = get_mmuidx(oi); + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + uintptr_t haddr; + + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + val = TGT_BE(val); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); + return; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + int i, index2; + target_ulong page2, tlb_addr2; + do_unaligned_access: + /* Ensure the second page is in the TLB. Note that the first page + is already guaranteed to be filled, and that the second page + cannot evict the first. */ + page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; + index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; + if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + && !VICTIM_TLB_HIT(addr_write, page2)) { + tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* XXX: not efficient, but simple */ + /* This loop must go in the forward direction to avoid issues + with self-modifying code. */ + for (i = 0; i < DATA_SIZE; ++i) { + /* Big-endian extract. */ + uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); + glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, + oi, retaddr); + } + return; + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; + glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); +} +#endif /* DATA_SIZE > 1 */ +#endif /* !defined(SOFTMMU_CODE_ACCESS) */ + +#undef READ_ACCESS_TYPE +#undef DATA_TYPE +#undef SUFFIX +#undef LSUFFIX +#undef DATA_SIZE +#undef ADDR_READ +#undef WORD_TYPE +#undef SDATA_TYPE +#undef USUFFIX +#undef SSUFFIX +#undef BSWAP +#undef helper_le_ld_name +#undef helper_be_ld_name +#undef helper_le_lds_name +#undef helper_be_lds_name +#undef helper_le_st_name +#undef helper_be_st_name |