summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-06-05 21:06:13 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-06-05 21:06:14 +0100
commit31e25e3e5701607a2a88b5b6c5fb1057b20941fd (patch)
tree757e835a8cef83a5e28f5e99704854836ca5df62 /include
parent9d48d3f01cf3f67d54cd7e2c7834e97a57cea0b8 (diff)
parent16b96f82cdfcb185560c2f8ebfc731711e2ccb2d (diff)
downloadqemu-31e25e3e5701607a2a88b5b6c5fb1057b20941fd.zip
Merge remote-tracking branch 'remotes/bonzini/softmmu-smap' into staging
* remotes/bonzini/softmmu-smap: (33 commits) target-i386: cleanup x86_cpu_get_phys_page_debug target-i386: fix protection bits in the TLB for SMEP target-i386: support long addresses for 4MB pages (PSE-36) target-i386: raise page fault for reserved bits in large pages target-i386: unify reserved bits and NX bit check target-i386: simplify pte/vaddr calculation target-i386: raise page fault for reserved physical address bits target-i386: test reserved PS bit on PML4Es target-i386: set correct error code for reserved bit access target-i386: introduce support for 1 GB pages target-i386: introduce do_check_protect label target-i386: tweak handling of PG_NX_MASK target-i386: commonize checks for PAE and non-PAE target-i386: commonize checks for 4MB and 4KB pages target-i386: commonize checks for 2MB and 4KB pages target-i386: fix coding standards in x86_cpu_handle_mmu_fault target-i386: simplify SMAP handling in MMU_KSMAP_IDX target-i386: fix kernel accesses with SMAP and CPL = 3 target-i386: move check_io helpers to seg_helper.c target-i386: rename KSMAP to KNOSMAP ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/exec/cpu-all.h119
-rw-r--r--include/exec/cpu_ldst.h400
-rw-r--r--include/exec/cpu_ldst_template.h (renamed from include/exec/softmmu_header.h)32
-rw-r--r--include/exec/exec-all.h23
-rw-r--r--include/exec/softmmu_exec.h216
-rw-r--r--include/exec/softmmu_template.h525
-rw-r--r--include/qom/cpu.h15
7 files changed, 419 insertions, 911 deletions
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 9cab592dc5..e8363d7248 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -198,127 +198,8 @@ extern unsigned long reserved_va;
#define RESERVED_VA 0ul
#endif
-/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
-#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
-
-#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
-#define h2g_valid(x) 1
-#else
-#define h2g_valid(x) ({ \
- unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
- (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
- (!RESERVED_VA || (__guest < RESERVED_VA)); \
-})
#endif
-#define h2g_nocheck(x) ({ \
- unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
- (abi_ulong)__ret; \
-})
-
-#define h2g(x) ({ \
- /* Check if given address fits target address space */ \
- assert(h2g_valid(x)); \
- h2g_nocheck(x); \
-})
-
-#define saddr(x) g2h(x)
-#define laddr(x) g2h(x)
-
-#else /* !CONFIG_USER_ONLY */
-/* NOTE: we use double casts if pointers and target_ulong have
- different sizes */
-#define saddr(x) (uint8_t *)(intptr_t)(x)
-#define laddr(x) (uint8_t *)(intptr_t)(x)
-#endif
-
-#define ldub_raw(p) ldub_p(laddr((p)))
-#define ldsb_raw(p) ldsb_p(laddr((p)))
-#define lduw_raw(p) lduw_p(laddr((p)))
-#define ldsw_raw(p) ldsw_p(laddr((p)))
-#define ldl_raw(p) ldl_p(laddr((p)))
-#define ldq_raw(p) ldq_p(laddr((p)))
-#define ldfl_raw(p) ldfl_p(laddr((p)))
-#define ldfq_raw(p) ldfq_p(laddr((p)))
-#define stb_raw(p, v) stb_p(saddr((p)), v)
-#define stw_raw(p, v) stw_p(saddr((p)), v)
-#define stl_raw(p, v) stl_p(saddr((p)), v)
-#define stq_raw(p, v) stq_p(saddr((p)), v)
-#define stfl_raw(p, v) stfl_p(saddr((p)), v)
-#define stfq_raw(p, v) stfq_p(saddr((p)), v)
-
-
-#if defined(CONFIG_USER_ONLY)
-
-/* if user mode, no other memory access functions */
-#define ldub(p) ldub_raw(p)
-#define ldsb(p) ldsb_raw(p)
-#define lduw(p) lduw_raw(p)
-#define ldsw(p) ldsw_raw(p)
-#define ldl(p) ldl_raw(p)
-#define ldq(p) ldq_raw(p)
-#define ldfl(p) ldfl_raw(p)
-#define ldfq(p) ldfq_raw(p)
-#define stb(p, v) stb_raw(p, v)
-#define stw(p, v) stw_raw(p, v)
-#define stl(p, v) stl_raw(p, v)
-#define stq(p, v) stq_raw(p, v)
-#define stfl(p, v) stfl_raw(p, v)
-#define stfq(p, v) stfq_raw(p, v)
-
-#define cpu_ldub_code(env1, p) ldub_raw(p)
-#define cpu_ldsb_code(env1, p) ldsb_raw(p)
-#define cpu_lduw_code(env1, p) lduw_raw(p)
-#define cpu_ldsw_code(env1, p) ldsw_raw(p)
-#define cpu_ldl_code(env1, p) ldl_raw(p)
-#define cpu_ldq_code(env1, p) ldq_raw(p)
-
-#define cpu_ldub_data(env, addr) ldub_raw(addr)
-#define cpu_lduw_data(env, addr) lduw_raw(addr)
-#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
-#define cpu_ldl_data(env, addr) ldl_raw(addr)
-#define cpu_ldq_data(env, addr) ldq_raw(addr)
-
-#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
-#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
-#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
-#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
-
-#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
-#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
-#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
-#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
-#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
-
-#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
-#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
-#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
-#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
-
-#define ldub_kernel(p) ldub_raw(p)
-#define ldsb_kernel(p) ldsb_raw(p)
-#define lduw_kernel(p) lduw_raw(p)
-#define ldsw_kernel(p) ldsw_raw(p)
-#define ldl_kernel(p) ldl_raw(p)
-#define ldq_kernel(p) ldq_raw(p)
-#define ldfl_kernel(p) ldfl_raw(p)
-#define ldfq_kernel(p) ldfq_raw(p)
-#define stb_kernel(p, v) stb_raw(p, v)
-#define stw_kernel(p, v) stw_raw(p, v)
-#define stl_kernel(p, v) stl_raw(p, v)
-#define stq_kernel(p, v) stq_raw(p, v)
-#define stfl_kernel(p, v) stfl_raw(p, v)
-#define stfq_kernel(p, vt) stfq_raw(p, v)
-
-#define cpu_ldub_data(env, addr) ldub_raw(addr)
-#define cpu_lduw_data(env, addr) lduw_raw(addr)
-#define cpu_ldl_data(env, addr) ldl_raw(addr)
-
-#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
-#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
-#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
-#endif /* defined(CONFIG_USER_ONLY) */
-
/* page related stuff */
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
new file mode 100644
index 0000000000..e5550e7175
--- /dev/null
+++ b/include/exec/cpu_ldst.h
@@ -0,0 +1,400 @@
+/*
+ * Software MMU support
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+/*
+ * Generate inline load/store functions for all MMU modes (typically
+ * at least _user and _kernel) as well as _data versions, for all data
+ * sizes.
+ *
+ * Used by target op helpers.
+ *
+ * MMU mode suffixes are defined in target cpu.h.
+ */
+#ifndef CPU_LDST_H
+#define CPU_LDST_H
+
+#if defined(CONFIG_USER_ONLY)
+/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
+#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
+
+#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
+#define h2g_valid(x) 1
+#else
+#define h2g_valid(x) ({ \
+ unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
+ (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
+ (!RESERVED_VA || (__guest < RESERVED_VA)); \
+})
+#endif
+
+#define h2g_nocheck(x) ({ \
+ unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
+ (abi_ulong)__ret; \
+})
+
+#define h2g(x) ({ \
+ /* Check if given address fits target address space */ \
+ assert(h2g_valid(x)); \
+ h2g_nocheck(x); \
+})
+
+#define saddr(x) g2h(x)
+#define laddr(x) g2h(x)
+
+#else /* !CONFIG_USER_ONLY */
+/* NOTE: we use double casts if pointers and target_ulong have
+ different sizes */
+#define saddr(x) (uint8_t *)(intptr_t)(x)
+#define laddr(x) (uint8_t *)(intptr_t)(x)
+#endif
+
+#define ldub_raw(p) ldub_p(laddr((p)))
+#define ldsb_raw(p) ldsb_p(laddr((p)))
+#define lduw_raw(p) lduw_p(laddr((p)))
+#define ldsw_raw(p) ldsw_p(laddr((p)))
+#define ldl_raw(p) ldl_p(laddr((p)))
+#define ldq_raw(p) ldq_p(laddr((p)))
+#define ldfl_raw(p) ldfl_p(laddr((p)))
+#define ldfq_raw(p) ldfq_p(laddr((p)))
+#define stb_raw(p, v) stb_p(saddr((p)), v)
+#define stw_raw(p, v) stw_p(saddr((p)), v)
+#define stl_raw(p, v) stl_p(saddr((p)), v)
+#define stq_raw(p, v) stq_p(saddr((p)), v)
+#define stfl_raw(p, v) stfl_p(saddr((p)), v)
+#define stfq_raw(p, v) stfq_p(saddr((p)), v)
+
+
+#if defined(CONFIG_USER_ONLY)
+
+/* if user mode, no other memory access functions */
+#define ldub(p) ldub_raw(p)
+#define ldsb(p) ldsb_raw(p)
+#define lduw(p) lduw_raw(p)
+#define ldsw(p) ldsw_raw(p)
+#define ldl(p) ldl_raw(p)
+#define ldq(p) ldq_raw(p)
+#define ldfl(p) ldfl_raw(p)
+#define ldfq(p) ldfq_raw(p)
+#define stb(p, v) stb_raw(p, v)
+#define stw(p, v) stw_raw(p, v)
+#define stl(p, v) stl_raw(p, v)
+#define stq(p, v) stq_raw(p, v)
+#define stfl(p, v) stfl_raw(p, v)
+#define stfq(p, v) stfq_raw(p, v)
+
+#define cpu_ldub_code(env1, p) ldub_raw(p)
+#define cpu_ldsb_code(env1, p) ldsb_raw(p)
+#define cpu_lduw_code(env1, p) lduw_raw(p)
+#define cpu_ldsw_code(env1, p) ldsw_raw(p)
+#define cpu_ldl_code(env1, p) ldl_raw(p)
+#define cpu_ldq_code(env1, p) ldq_raw(p)
+
+#define cpu_ldub_data(env, addr) ldub_raw(addr)
+#define cpu_lduw_data(env, addr) lduw_raw(addr)
+#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
+#define cpu_ldl_data(env, addr) ldl_raw(addr)
+#define cpu_ldq_data(env, addr) ldq_raw(addr)
+
+#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
+#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
+
+#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
+#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
+#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
+#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
+#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
+
+#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
+#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
+
+#define ldub_kernel(p) ldub_raw(p)
+#define ldsb_kernel(p) ldsb_raw(p)
+#define lduw_kernel(p) lduw_raw(p)
+#define ldsw_kernel(p) ldsw_raw(p)
+#define ldl_kernel(p) ldl_raw(p)
+#define ldq_kernel(p) ldq_raw(p)
+#define ldfl_kernel(p) ldfl_raw(p)
+#define ldfq_kernel(p) ldfq_raw(p)
+#define stb_kernel(p, v) stb_raw(p, v)
+#define stw_kernel(p, v) stw_raw(p, v)
+#define stl_kernel(p, v) stl_raw(p, v)
+#define stq_kernel(p, v) stq_raw(p, v)
+#define stfl_kernel(p, v) stfl_raw(p, v)
+#define stfq_kernel(p, vt) stfq_raw(p, v)
+
+#define cpu_ldub_data(env, addr) ldub_raw(addr)
+#define cpu_lduw_data(env, addr) lduw_raw(addr)
+#define cpu_ldl_data(env, addr) ldl_raw(addr)
+
+#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
+
+#else
+
+/* XXX: find something cleaner.
+ * Furthermore, this is false for 64 bits targets
+ */
+#define ldul_user ldl_user
+#define ldul_kernel ldl_kernel
+#define ldul_hypv ldl_hypv
+#define ldul_executive ldl_executive
+#define ldul_supervisor ldl_supervisor
+
+/* The memory helpers for tcg-generated code need tcg_target_long etc. */
+#include "tcg.h"
+
+uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+
+void helper_stb_mmu(CPUArchState *env, target_ulong addr,
+ uint8_t val, int mmu_idx);
+void helper_stw_mmu(CPUArchState *env, target_ulong addr,
+ uint16_t val, int mmu_idx);
+void helper_stl_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t val, int mmu_idx);
+void helper_stq_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t val, int mmu_idx);
+
+uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+
+#define CPU_MMU_INDEX 0
+#define MEMSUFFIX MMU_MODE0_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+
+#define CPU_MMU_INDEX 1
+#define MEMSUFFIX MMU_MODE1_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+
+#if (NB_MMU_MODES >= 3)
+
+#define CPU_MMU_INDEX 2
+#define MEMSUFFIX MMU_MODE2_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 3) */
+
+#if (NB_MMU_MODES >= 4)
+
+#define CPU_MMU_INDEX 3
+#define MEMSUFFIX MMU_MODE3_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 4) */
+
+#if (NB_MMU_MODES >= 5)
+
+#define CPU_MMU_INDEX 4
+#define MEMSUFFIX MMU_MODE4_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 5) */
+
+#if (NB_MMU_MODES >= 6)
+
+#define CPU_MMU_INDEX 5
+#define MEMSUFFIX MMU_MODE5_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 6) */
+
+#if (NB_MMU_MODES > 6)
+#error "NB_MMU_MODES > 6 is not supported for now"
+#endif /* (NB_MMU_MODES > 6) */
+
+/* these access are slower, they must be as rare as possible */
+#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define MEMSUFFIX _data
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+
+#define ldub(p) ldub_data(p)
+#define ldsb(p) ldsb_data(p)
+#define lduw(p) lduw_data(p)
+#define ldsw(p) ldsw_data(p)
+#define ldl(p) ldl_data(p)
+#define ldq(p) ldq_data(p)
+
+#define stb(p, v) stb_data(p, v)
+#define stw(p, v) stw_data(p, v)
+#define stl(p, v) stl_data(p, v)
+#define stq(p, v) stq_data(p, v)
+
+#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define MEMSUFFIX _code
+#define SOFTMMU_CODE_ACCESS
+
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#undef SOFTMMU_CODE_ACCESS
+
+/**
+ * tlb_vaddr_to_host:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @mmu_idx: MMU index to use for lookup
+ *
+ * Look up the specified guest virtual index in the TCG softmmu TLB.
+ * If the TLB contains a host virtual address suitable for direct RAM
+ * access, then return it. Otherwise (TLB miss, TLB entry is for an
+ * I/O access, etc) return NULL.
+ *
+ * This is the equivalent of the initial fast-path code used by
+ * TCG backends for guest load and store accesses.
+ */
+static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
+ int access_type, int mmu_idx)
+{
+ int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
+ target_ulong tlb_addr;
+ uintptr_t haddr;
+
+ switch (access_type) {
+ case 0:
+ tlb_addr = tlbentry->addr_read;
+ break;
+ case 1:
+ tlb_addr = tlbentry->addr_write;
+ break;
+ case 2:
+ tlb_addr = tlbentry->addr_code;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if ((addr & TARGET_PAGE_MASK)
+ != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ /* TLB entry is for a different page */
+ return NULL;
+ }
+
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ return NULL;
+ }
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+ return (void *)haddr;
+}
+
+#endif /* defined(CONFIG_USER_ONLY) */
+
+#endif /* CPU_LDST_H */
diff --git a/include/exec/softmmu_header.h b/include/exec/cpu_ldst_template.h
index d8d9c81b05..006093ac49 100644
--- a/include/exec/softmmu_header.h
+++ b/include/exec/cpu_ldst_template.h
@@ -8,7 +8,7 @@
* 32 and 64 bit cases, also generate floating point functions with
* the same size.
*
- * Not used directly but included from softmmu_exec.h and exec-all.h.
+ * Not used directly but included from cpu_ldst.h.
*
* Copyright (c) 2003 Fabrice Bellard
*
@@ -47,35 +47,18 @@
#error unsupported data size
#endif
-#if ACCESS_TYPE < (NB_MMU_MODES)
-
-#define CPU_MMU_INDEX ACCESS_TYPE
-#define MMUSUFFIX _mmu
-
-#elif ACCESS_TYPE == (NB_MMU_MODES)
-
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
-#define MMUSUFFIX _mmu
-
-#elif ACCESS_TYPE == (NB_MMU_MODES + 1)
-
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
-#define MMUSUFFIX _cmmu
-
-#else
-#error invalid ACCESS_TYPE
-#endif
-
#if DATA_SIZE == 8
#define RES_TYPE uint64_t
#else
#define RES_TYPE uint32_t
#endif
-#if ACCESS_TYPE == (NB_MMU_MODES + 1)
+#ifdef SOFTMMU_CODE_ACCESS
#define ADDR_READ addr_code
+#define MMUSUFFIX _cmmu
#else
#define ADDR_READ addr_read
+#define MMUSUFFIX _mmu
#endif
/* generic load/store macros */
@@ -124,7 +107,7 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
}
#endif
-#if ACCESS_TYPE != (NB_MMU_MODES + 1)
+#ifndef SOFTMMU_CODE_ACCESS
/* generic store macro */
@@ -148,9 +131,7 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
}
}
-#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
-#if ACCESS_TYPE != (NB_MMU_MODES + 1)
#if DATA_SIZE == 8
static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
@@ -200,7 +181,7 @@ static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
}
#endif /* DATA_SIZE == 4 */
-#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
+#endif /* !SOFTMMU_CODE_ACCESS */
#undef RES_TYPE
#undef DATA_TYPE
@@ -208,6 +189,5 @@ static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
#undef SUFFIX
#undef USUFFIX
#undef DATA_SIZE
-#undef CPU_MMU_INDEX
#undef MMUSUFFIX
#undef ADDR_READ
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index c964ca4f0b..3d62d9c464 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -344,29 +344,6 @@ bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr);
-uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-
-#define ACCESS_TYPE (NB_MMU_MODES + 1)
-#define MEMSUFFIX _code
-
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
#endif
#if defined(CONFIG_USER_ONLY)
diff --git a/include/exec/softmmu_exec.h b/include/exec/softmmu_exec.h
deleted file mode 100644
index 470db20174..0000000000
--- a/include/exec/softmmu_exec.h
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Software MMU support
- *
- * Generate inline load/store functions for all MMU modes (typically
- * at least _user and _kernel) as well as _data versions, for all data
- * sizes.
- *
- * Used by target op helpers.
- *
- * MMU mode suffixes are defined in target cpu.h.
- */
-
-/* XXX: find something cleaner.
- * Furthermore, this is false for 64 bits targets
- */
-#define ldul_user ldl_user
-#define ldul_kernel ldl_kernel
-#define ldul_hypv ldl_hypv
-#define ldul_executive ldl_executive
-#define ldul_supervisor ldl_supervisor
-
-/* The memory helpers for tcg-generated code need tcg_target_long etc. */
-#include "tcg.h"
-
-#define ACCESS_TYPE 0
-#define MEMSUFFIX MMU_MODE0_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
-#define ACCESS_TYPE 1
-#define MEMSUFFIX MMU_MODE1_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
-#if (NB_MMU_MODES >= 3)
-
-#define ACCESS_TYPE 2
-#define MEMSUFFIX MMU_MODE2_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 3) */
-
-#if (NB_MMU_MODES >= 4)
-
-#define ACCESS_TYPE 3
-#define MEMSUFFIX MMU_MODE3_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 4) */
-
-#if (NB_MMU_MODES >= 5)
-
-#define ACCESS_TYPE 4
-#define MEMSUFFIX MMU_MODE4_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 5) */
-
-#if (NB_MMU_MODES >= 6)
-
-#define ACCESS_TYPE 5
-#define MEMSUFFIX MMU_MODE5_SUFFIX
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-#endif /* (NB_MMU_MODES >= 6) */
-
-#if (NB_MMU_MODES > 6)
-#error "NB_MMU_MODES > 6 is not supported for now"
-#endif /* (NB_MMU_MODES > 6) */
-
-/* these access are slower, they must be as rare as possible */
-#define ACCESS_TYPE (NB_MMU_MODES)
-#define MEMSUFFIX _data
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
-#define ldub(p) ldub_data(p)
-#define ldsb(p) ldsb_data(p)
-#define lduw(p) lduw_data(p)
-#define ldsw(p) ldsw_data(p)
-#define ldl(p) ldl_data(p)
-#define ldq(p) ldq_data(p)
-
-#define stb(p, v) stb_data(p, v)
-#define stw(p, v) stw_data(p, v)
-#define stl(p, v) stl_data(p, v)
-#define stq(p, v) stq_data(p, v)
-
-/**
- * tlb_vaddr_to_host:
- * @env: CPUArchState
- * @addr: guest virtual address to look up
- * @access_type: 0 for read, 1 for write, 2 for execute
- * @mmu_idx: MMU index to use for lookup
- *
- * Look up the specified guest virtual index in the TCG softmmu TLB.
- * If the TLB contains a host virtual address suitable for direct RAM
- * access, then return it. Otherwise (TLB miss, TLB entry is for an
- * I/O access, etc) return NULL.
- *
- * This is the equivalent of the initial fast-path code used by
- * TCG backends for guest load and store accesses.
- */
-static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
- int access_type, int mmu_idx)
-{
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
- target_ulong tlb_addr;
- uintptr_t haddr;
-
- switch (access_type) {
- case 0:
- tlb_addr = tlbentry->addr_read;
- break;
- case 1:
- tlb_addr = tlbentry->addr_write;
- break;
- case 2:
- tlb_addr = tlbentry->addr_code;
- break;
- default:
- g_assert_not_reached();
- }
-
- if ((addr & TARGET_PAGE_MASK)
- != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- /* TLB entry is for a different page */
- return NULL;
- }
-
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- return NULL;
- }
-
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
- return (void *)haddr;
-}
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
deleted file mode 100644
index 73ed7cf921..0000000000
--- a/include/exec/softmmu_template.h
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
- * Software MMU support
- *
- * Generate helpers used by TCG for qemu_ld/st ops and code load
- * functions.
- *
- * Included from target op helpers and exec.c.
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/timer.h"
-#include "exec/address-spaces.h"
-#include "exec/memory.h"
-
-#define DATA_SIZE (1 << SHIFT)
-
-#if DATA_SIZE == 8
-#define SUFFIX q
-#define LSUFFIX q
-#define SDATA_TYPE int64_t
-#define DATA_TYPE uint64_t
-#elif DATA_SIZE == 4
-#define SUFFIX l
-#define LSUFFIX l
-#define SDATA_TYPE int32_t
-#define DATA_TYPE uint32_t
-#elif DATA_SIZE == 2
-#define SUFFIX w
-#define LSUFFIX uw
-#define SDATA_TYPE int16_t
-#define DATA_TYPE uint16_t
-#elif DATA_SIZE == 1
-#define SUFFIX b
-#define LSUFFIX ub
-#define SDATA_TYPE int8_t
-#define DATA_TYPE uint8_t
-#else
-#error unsupported data size
-#endif
-
-
-/* For the benefit of TCG generated code, we want to avoid the complication
- of ABI-specific return type promotion and always return a value extended
- to the register size of the host. This is tcg_target_long, except in the
- case of a 32-bit host and 64-bit data, and for that we always have
- uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
-#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
-# define WORD_TYPE DATA_TYPE
-# define USUFFIX SUFFIX
-#else
-# define WORD_TYPE tcg_target_ulong
-# define USUFFIX glue(u, SUFFIX)
-# define SSUFFIX glue(s, SUFFIX)
-#endif
-
-#ifdef SOFTMMU_CODE_ACCESS
-#define READ_ACCESS_TYPE 2
-#define ADDR_READ addr_code
-#else
-#define READ_ACCESS_TYPE 0
-#define ADDR_READ addr_read
-#endif
-
-#if DATA_SIZE == 8
-# define BSWAP(X) bswap64(X)
-#elif DATA_SIZE == 4
-# define BSWAP(X) bswap32(X)
-#elif DATA_SIZE == 2
-# define BSWAP(X) bswap16(X)
-#else
-# define BSWAP(X) (X)
-#endif
-
-#ifdef TARGET_WORDS_BIGENDIAN
-# define TGT_BE(X) (X)
-# define TGT_LE(X) BSWAP(X)
-#else
-# define TGT_BE(X) BSWAP(X)
-# define TGT_LE(X) (X)
-#endif
-
-#if DATA_SIZE == 1
-# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
-# define helper_be_ld_name helper_le_ld_name
-# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
-# define helper_be_lds_name helper_le_lds_name
-# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
-# define helper_be_st_name helper_le_st_name
-#else
-# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
-# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
-# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
-# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
-# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
-# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
-#endif
-
-#ifdef TARGET_WORDS_BIGENDIAN
-# define helper_te_ld_name helper_be_ld_name
-# define helper_te_st_name helper_be_st_name
-#else
-# define helper_te_ld_name helper_le_ld_name
-# define helper_te_st_name helper_le_st_name
-#endif
-
-static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
- hwaddr physaddr,
- target_ulong addr,
- uintptr_t retaddr)
-{
- uint64_t val;
- CPUState *cpu = ENV_GET_CPU(env);
- MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
-
- physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- cpu->mem_io_pc = retaddr;
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
- cpu_io_recompile(cpu, retaddr);
- }
-
- cpu->mem_io_vaddr = addr;
- io_mem_read(mr, physaddr, &val, 1 << SHIFT);
- return val;
-}
-
-#ifdef SOFTMMU_CODE_ACCESS
-static __attribute__((unused))
-#endif
-WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
- uintptr_t retaddr)
-{
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
- uintptr_t haddr;
- DATA_TYPE res;
-
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
- /* If the TLB entry is for a different page, reload and try again. */
- if ((addr & TARGET_PAGE_MASK)
- != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- }
-#endif
- tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
- }
-
- /* Handle an IO access. */
- if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- hwaddr ioaddr;
- if ((addr & (DATA_SIZE - 1)) != 0) {
- goto do_unaligned_access;
- }
- ioaddr = env->iotlb[mmu_idx][index];
-
- /* ??? Note that the io helpers always read data in the target
- byte ordering. We should push the LE/BE request down into io. */
- res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
- res = TGT_LE(res);
- return res;
- }
-
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- target_ulong addr1, addr2;
- DATA_TYPE res1, res2;
- unsigned shift;
- do_unaligned_access:
-#ifdef ALIGNED_ONLY
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
-#endif
- addr1 = addr & ~(DATA_SIZE - 1);
- addr2 = addr1 + DATA_SIZE;
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
- res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
- shift = (addr & (DATA_SIZE - 1)) * 8;
-
- /* Little-endian combine. */
- res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
- return res;
- }
-
- /* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- }
-#endif
-
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
-#if DATA_SIZE == 1
- res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
-#else
- res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
-#endif
- return res;
-}
-
-#if DATA_SIZE > 1
-#ifdef SOFTMMU_CODE_ACCESS
-static __attribute__((unused))
-#endif
-WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
- uintptr_t retaddr)
-{
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
- uintptr_t haddr;
- DATA_TYPE res;
-
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
- /* If the TLB entry is for a different page, reload and try again. */
- if ((addr & TARGET_PAGE_MASK)
- != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- }
-#endif
- tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
- }
-
- /* Handle an IO access. */
- if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- hwaddr ioaddr;
- if ((addr & (DATA_SIZE - 1)) != 0) {
- goto do_unaligned_access;
- }
- ioaddr = env->iotlb[mmu_idx][index];
-
- /* ??? Note that the io helpers always read data in the target
- byte ordering. We should push the LE/BE request down into io. */
- res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
- res = TGT_BE(res);
- return res;
- }
-
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- target_ulong addr1, addr2;
- DATA_TYPE res1, res2;
- unsigned shift;
- do_unaligned_access:
-#ifdef ALIGNED_ONLY
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
-#endif
- addr1 = addr & ~(DATA_SIZE - 1);
- addr2 = addr1 + DATA_SIZE;
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
- res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
- shift = (addr & (DATA_SIZE - 1)) * 8;
-
- /* Big-endian combine. */
- res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
- return res;
- }
-
- /* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
- }
-#endif
-
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
- res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
- return res;
-}
-#endif /* DATA_SIZE > 1 */
-
-DATA_TYPE
-glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
- int mmu_idx)
-{
- return helper_te_ld_name (env, addr, mmu_idx, GETRA());
-}
-
-#ifndef SOFTMMU_CODE_ACCESS
-
-/* Provide signed versions of the load routines as well. We can of course
- avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
-#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
-WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr)
-{
- return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
-}
-
-# if DATA_SIZE > 1
-WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr)
-{
- return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
-}
-# endif
-#endif
-
-static inline void glue(io_write, SUFFIX)(CPUArchState *env,
- hwaddr physaddr,
- DATA_TYPE val,
- target_ulong addr,
- uintptr_t retaddr)
-{
- CPUState *cpu = ENV_GET_CPU(env);
- MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
-
- physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
- cpu_io_recompile(cpu, retaddr);
- }
-
- cpu->mem_io_vaddr = addr;
- cpu->mem_io_pc = retaddr;
- io_mem_write(mr, physaddr, val, 1 << SHIFT);
-}
-
-void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
- int mmu_idx, uintptr_t retaddr)
-{
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- uintptr_t haddr;
-
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
- /* If the TLB entry is for a different page, reload and try again. */
- if ((addr & TARGET_PAGE_MASK)
- != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
- }
-#endif
- tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- }
-
- /* Handle an IO access. */
- if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- hwaddr ioaddr;
- if ((addr & (DATA_SIZE - 1)) != 0) {
- goto do_unaligned_access;
- }
- ioaddr = env->iotlb[mmu_idx][index];
-
- /* ??? Note that the io helpers always read data in the target
- byte ordering. We should push the LE/BE request down into io. */
- val = TGT_LE(val);
- glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
- return;
- }
-
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- int i;
- do_unaligned_access:
-#ifdef ALIGNED_ONLY
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
-#endif
- /* XXX: not efficient, but simple */
- /* Note: relies on the fact that tlb_fill() does not remove the
- * previous page from the TLB cache. */
- for (i = DATA_SIZE - 1; i >= 0; i--) {
- /* Little-endian extract. */
- uint8_t val8 = val >> (i * 8);
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- mmu_idx, retaddr + GETPC_ADJ);
- }
- return;
- }
-
- /* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
- }
-#endif
-
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
-#if DATA_SIZE == 1
- glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
-#else
- glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
-#endif
-}
-
-#if DATA_SIZE > 1
-void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
- int mmu_idx, uintptr_t retaddr)
-{
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- uintptr_t haddr;
-
- /* Adjust the given return address. */
- retaddr -= GETPC_ADJ;
-
- /* If the TLB entry is for a different page, reload and try again. */
- if ((addr & TARGET_PAGE_MASK)
- != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
- }
-#endif
- tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- }
-
- /* Handle an IO access. */
- if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- hwaddr ioaddr;
- if ((addr & (DATA_SIZE - 1)) != 0) {
- goto do_unaligned_access;
- }
- ioaddr = env->iotlb[mmu_idx][index];
-
- /* ??? Note that the io helpers always read data in the target
- byte ordering. We should push the LE/BE request down into io. */
- val = TGT_BE(val);
- glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
- return;
- }
-
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- int i;
- do_unaligned_access:
-#ifdef ALIGNED_ONLY
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
-#endif
- /* XXX: not efficient, but simple */
- /* Note: relies on the fact that tlb_fill() does not remove the
- * previous page from the TLB cache. */
- for (i = DATA_SIZE - 1; i >= 0; i--) {
- /* Big-endian extract. */
- uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- mmu_idx, retaddr + GETPC_ADJ);
- }
- return;
- }
-
- /* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
- do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
- }
-#endif
-
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
- glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
-}
-#endif /* DATA_SIZE > 1 */
-
-void
-glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
- DATA_TYPE val, int mmu_idx)
-{
- helper_te_st_name(env, addr, val, mmu_idx, GETRA());
-}
-
-#endif /* !defined(SOFTMMU_CODE_ACCESS) */
-
-#undef READ_ACCESS_TYPE
-#undef SHIFT
-#undef DATA_TYPE
-#undef SUFFIX
-#undef LSUFFIX
-#undef DATA_SIZE
-#undef ADDR_READ
-#undef WORD_TYPE
-#undef SDATA_TYPE
-#undef USUFFIX
-#undef SSUFFIX
-#undef BSWAP
-#undef TGT_BE
-#undef TGT_LE
-#undef CPU_BE
-#undef CPU_LE
-#undef helper_le_ld_name
-#undef helper_be_ld_name
-#undef helper_le_lds_name
-#undef helper_be_lds_name
-#undef helper_le_st_name
-#undef helper_be_st_name
-#undef helper_te_ld_name
-#undef helper_te_st_name
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index df977c88f0..4b352a28fa 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -80,6 +80,8 @@ struct TranslationBlock;
* @has_work: Callback for checking if there is work to do.
* @do_interrupt: Callback for interrupt handling.
* @do_unassigned_access: Callback for unassigned access handling.
+ * @do_unaligned_access: Callback for unaligned access handling, if
+ * the target defines #ALIGNED_ONLY.
* @memory_rw_debug: Callback for GDB memory access.
* @dump_state: Callback for dumping state.
* @dump_statistics: Callback for dumping statistics.
@@ -112,6 +114,8 @@ typedef struct CPUClass {
bool (*has_work)(CPUState *cpu);
void (*do_interrupt)(CPUState *cpu);
CPUUnassignedAccess do_unassigned_access;
+ void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
+ int is_write, int is_user, uintptr_t retaddr);
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
uint8_t *buf, int len, bool is_write);
void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
@@ -544,8 +548,7 @@ void cpu_interrupt(CPUState *cpu, int mask);
#endif /* USER_ONLY */
-#ifndef CONFIG_USER_ONLY
-
+#ifdef CONFIG_SOFTMMU
static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
bool is_write, bool is_exec,
int opaque, unsigned size)
@@ -557,6 +560,14 @@ static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
}
}
+static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
+ int is_write, int is_user,
+ uintptr_t retaddr)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
+ return cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr);
+}
#endif
/**