From 859d76120b87598bed0ba0757f62327cc8834332 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 11:15:30 +0100 Subject: softmmu: start introducing SOFTMMU_CODE_ACCESS in softmmu_header.h This preprocessor symbol is already used in softmmu_template.h. We will use it to distinguish the two "fake" ACCESS_TYPEs NB_MMU_MODES and NB_MMU_MODES + 1. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- include/exec/exec-all.h | 2 ++ include/exec/softmmu_header.h | 8 +++----- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index c964ca4f0b..8223fbf246 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -351,6 +351,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); #define ACCESS_TYPE (NB_MMU_MODES + 1) #define MEMSUFFIX _code +#define SOFTMMU_CODE_ACCESS #define DATA_SIZE 1 #include "exec/softmmu_header.h" @@ -366,6 +367,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); #undef ACCESS_TYPE #undef MEMSUFFIX +#undef SOFTMMU_CODE_ACCESS #endif diff --git a/include/exec/softmmu_header.h b/include/exec/softmmu_header.h index d8d9c81b05..087b2d4e1e 100644 --- a/include/exec/softmmu_header.h +++ b/include/exec/softmmu_header.h @@ -72,7 +72,7 @@ #define RES_TYPE uint32_t #endif -#if ACCESS_TYPE == (NB_MMU_MODES + 1) +#ifdef SOFTMMU_CODE_ACCESS #define ADDR_READ addr_code #else #define ADDR_READ addr_read @@ -124,7 +124,7 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) } #endif -#if ACCESS_TYPE != (NB_MMU_MODES + 1) +#ifndef SOFTMMU_CODE_ACCESS /* generic store macro */ @@ -148,9 +148,7 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr, } } -#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */ -#if ACCESS_TYPE != (NB_MMU_MODES + 1) #if DATA_SIZE == 8 static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env, @@ -200,7 +198,7 @@ static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env, } #endif /* DATA_SIZE == 4 */ -#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */ +#endif /* !SOFTMMU_CODE_ACCESS */ #undef RES_TYPE #undef DATA_TYPE -- cgit v1.2.3 From a6c9eac0d57d29bce82200175f9a4ef03c2c6bca Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 11:18:14 +0100 Subject: softmmu: move MMUSUFFIX under SOFTMMU_CODE_ACCESS Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- include/exec/softmmu_header.h | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/exec/softmmu_header.h b/include/exec/softmmu_header.h index 087b2d4e1e..cc85a43685 100644 --- a/include/exec/softmmu_header.h +++ b/include/exec/softmmu_header.h @@ -48,20 +48,11 @@ #endif #if ACCESS_TYPE < (NB_MMU_MODES) - #define CPU_MMU_INDEX ACCESS_TYPE -#define MMUSUFFIX _mmu - #elif ACCESS_TYPE == (NB_MMU_MODES) - #define CPU_MMU_INDEX (cpu_mmu_index(env)) -#define MMUSUFFIX _mmu - #elif ACCESS_TYPE == (NB_MMU_MODES + 1) - #define CPU_MMU_INDEX (cpu_mmu_index(env)) -#define MMUSUFFIX _cmmu - #else #error invalid ACCESS_TYPE #endif @@ -74,8 +65,10 @@ #ifdef SOFTMMU_CODE_ACCESS #define ADDR_READ addr_code +#define MMUSUFFIX _cmmu #else #define ADDR_READ addr_read +#define MMUSUFFIX _mmu #endif /* generic load/store macros */ -- cgit v1.2.3 From ca0aa408167888d862df3e7f734f6b7b35bd556d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 11:19:35 +0100 Subject: softmmu: move definition of CPU_MMU_INDEX to inclusion site, drop ACCESS_TYPE Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- include/exec/exec-all.h | 4 ++-- include/exec/softmmu_exec.h | 28 ++++++++++++++-------------- include/exec/softmmu_header.h | 11 ----------- 3 files changed, 16 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 8223fbf246..ef72027dd8 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -349,7 +349,7 @@ uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -#define ACCESS_TYPE (NB_MMU_MODES + 1) +#define CPU_MMU_INDEX (cpu_mmu_index(env)) #define MEMSUFFIX _code #define SOFTMMU_CODE_ACCESS @@ -365,7 +365,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); #define DATA_SIZE 8 #include "exec/softmmu_header.h" -#undef ACCESS_TYPE +#undef CPU_MMU_INDEX #undef MEMSUFFIX #undef SOFTMMU_CODE_ACCESS diff --git a/include/exec/softmmu_exec.h b/include/exec/softmmu_exec.h index 470db20174..8217733a9f 100644 --- a/include/exec/softmmu_exec.h +++ b/include/exec/softmmu_exec.h @@ -22,7 +22,7 @@ /* The memory helpers for tcg-generated code need tcg_target_long etc. */ #include "tcg.h" -#define ACCESS_TYPE 0 +#define CPU_MMU_INDEX 0 #define MEMSUFFIX MMU_MODE0_SUFFIX #define DATA_SIZE 1 #include "exec/softmmu_header.h" @@ -35,10 +35,10 @@ #define DATA_SIZE 8 #include "exec/softmmu_header.h" -#undef ACCESS_TYPE +#undef CPU_MMU_INDEX #undef MEMSUFFIX -#define ACCESS_TYPE 1 +#define CPU_MMU_INDEX 1 #define MEMSUFFIX MMU_MODE1_SUFFIX #define DATA_SIZE 1 #include "exec/softmmu_header.h" @@ -51,12 +51,12 @@ #define DATA_SIZE 8 #include "exec/softmmu_header.h" -#undef ACCESS_TYPE +#undef CPU_MMU_INDEX #undef MEMSUFFIX #if (NB_MMU_MODES >= 3) -#define ACCESS_TYPE 2 +#define CPU_MMU_INDEX 2 #define MEMSUFFIX MMU_MODE2_SUFFIX #define DATA_SIZE 1 #include "exec/softmmu_header.h" @@ -69,13 +69,13 @@ #define DATA_SIZE 8 #include "exec/softmmu_header.h" -#undef ACCESS_TYPE +#undef CPU_MMU_INDEX #undef MEMSUFFIX #endif /* (NB_MMU_MODES >= 3) */ #if (NB_MMU_MODES >= 4) -#define ACCESS_TYPE 3 +#define CPU_MMU_INDEX 3 #define MEMSUFFIX MMU_MODE3_SUFFIX #define DATA_SIZE 1 #include "exec/softmmu_header.h" @@ -88,13 +88,13 @@ #define DATA_SIZE 8 #include "exec/softmmu_header.h" -#undef ACCESS_TYPE +#undef CPU_MMU_INDEX #undef MEMSUFFIX #endif /* (NB_MMU_MODES >= 4) */ #if (NB_MMU_MODES >= 5) -#define ACCESS_TYPE 4 +#define CPU_MMU_INDEX 4 #define MEMSUFFIX MMU_MODE4_SUFFIX #define DATA_SIZE 1 #include "exec/softmmu_header.h" @@ -107,13 +107,13 @@ #define DATA_SIZE 8 #include "exec/softmmu_header.h" -#undef ACCESS_TYPE +#undef CPU_MMU_INDEX #undef MEMSUFFIX #endif /* (NB_MMU_MODES >= 5) */ #if (NB_MMU_MODES >= 6) -#define ACCESS_TYPE 5 +#define CPU_MMU_INDEX 5 #define MEMSUFFIX MMU_MODE5_SUFFIX #define DATA_SIZE 1 #include "exec/softmmu_header.h" @@ -126,7 +126,7 @@ #define DATA_SIZE 8 #include "exec/softmmu_header.h" -#undef ACCESS_TYPE +#undef CPU_MMU_INDEX #undef MEMSUFFIX #endif /* (NB_MMU_MODES >= 6) */ @@ -135,7 +135,7 @@ #endif /* (NB_MMU_MODES > 6) */ /* these access are slower, they must be as rare as possible */ -#define ACCESS_TYPE (NB_MMU_MODES) +#define CPU_MMU_INDEX (cpu_mmu_index(env)) #define MEMSUFFIX _data #define DATA_SIZE 1 #include "exec/softmmu_header.h" @@ -148,7 +148,7 @@ #define DATA_SIZE 8 #include "exec/softmmu_header.h" -#undef ACCESS_TYPE +#undef CPU_MMU_INDEX #undef MEMSUFFIX #define ldub(p) ldub_data(p) diff --git a/include/exec/softmmu_header.h b/include/exec/softmmu_header.h index cc85a43685..bb18f97024 100644 --- a/include/exec/softmmu_header.h +++ b/include/exec/softmmu_header.h @@ -47,16 +47,6 @@ #error unsupported data size #endif -#if ACCESS_TYPE < (NB_MMU_MODES) -#define CPU_MMU_INDEX ACCESS_TYPE -#elif ACCESS_TYPE == (NB_MMU_MODES) -#define CPU_MMU_INDEX (cpu_mmu_index(env)) -#elif ACCESS_TYPE == (NB_MMU_MODES + 1) -#define CPU_MMU_INDEX (cpu_mmu_index(env)) -#else -#error invalid ACCESS_TYPE -#endif - #if DATA_SIZE == 8 #define RES_TYPE uint64_t #else @@ -199,6 +189,5 @@ static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env, #undef SUFFIX #undef USUFFIX #undef DATA_SIZE -#undef CPU_MMU_INDEX #undef MMUSUFFIX #undef ADDR_READ -- cgit v1.2.3 From 93e22326d62d903b301e90bea71f0dbd0de858d3 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 18:14:58 +0100 Subject: softmmu: make do_unaligned_access a method of CPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We will reference it from more files in the next patch. To avoid ruining the small steps we're making towards multi-target, make it a method of CPU rather than just a global. Reviewed-by: Andreas Färber Signed-off-by: Paolo Bonzini --- include/exec/softmmu_template.h | 30 ++++++++++++++++++------------ include/qom/cpu.h | 15 +++++++++++++-- 2 files changed, 31 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h index 73ed7cf921..12ead5a2b1 100644 --- a/include/exec/softmmu_template.h +++ b/include/exec/softmmu_template.h @@ -155,7 +155,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); } #endif tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -186,7 +187,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, unsigned shift; do_unaligned_access: #ifdef ALIGNED_ONLY - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); #endif addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; @@ -204,7 +206,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); } #endif @@ -237,7 +240,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); } #endif tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -268,7 +272,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, unsigned shift; do_unaligned_access: #ifdef ALIGNED_ONLY - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); #endif addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; @@ -286,7 +291,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); } #endif @@ -357,7 +363,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); } #endif tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); @@ -386,7 +392,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, int i; do_unaligned_access: #ifdef ALIGNED_ONLY - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); #endif /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the @@ -405,7 +411,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); } #endif @@ -433,7 +439,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); } #endif tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); @@ -462,7 +468,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, int i; do_unaligned_access: #ifdef ALIGNED_ONLY - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); #endif /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the @@ -481,7 +487,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); } #endif diff --git a/include/qom/cpu.h b/include/qom/cpu.h index df977c88f0..4b352a28fa 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -80,6 +80,8 @@ struct TranslationBlock; * @has_work: Callback for checking if there is work to do. * @do_interrupt: Callback for interrupt handling. * @do_unassigned_access: Callback for unassigned access handling. + * @do_unaligned_access: Callback for unaligned access handling, if + * the target defines #ALIGNED_ONLY. * @memory_rw_debug: Callback for GDB memory access. * @dump_state: Callback for dumping state. * @dump_statistics: Callback for dumping statistics. @@ -112,6 +114,8 @@ typedef struct CPUClass { bool (*has_work)(CPUState *cpu); void (*do_interrupt)(CPUState *cpu); CPUUnassignedAccess do_unassigned_access; + void (*do_unaligned_access)(CPUState *cpu, vaddr addr, + int is_write, int is_user, uintptr_t retaddr); int (*memory_rw_debug)(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, @@ -544,8 +548,7 @@ void cpu_interrupt(CPUState *cpu, int mask); #endif /* USER_ONLY */ -#ifndef CONFIG_USER_ONLY - +#ifdef CONFIG_SOFTMMU static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, bool is_write, bool is_exec, int opaque, unsigned size) @@ -557,6 +560,14 @@ static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, } } +static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, + int is_write, int is_user, + uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + return cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr); +} #endif /** -- cgit v1.2.3 From 0f590e749f7c838bfd40b79242fc5aeb91e81747 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 17:55:24 +0100 Subject: softmmu: commonize helper definitions They do not need to be in op_helper.c. Because cputlb.c now includes softmmu_template.h twice for each size, io_readX must be elided the second time through. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- include/exec/softmmu_template.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h index 12ead5a2b1..5a07f991a1 100644 --- a/include/exec/softmmu_template.h +++ b/include/exec/softmmu_template.h @@ -116,6 +116,7 @@ # define helper_te_st_name helper_le_st_name #endif +#ifndef SOFTMMU_CODE_ACCESS static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, hwaddr physaddr, target_ulong addr, @@ -135,6 +136,7 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, io_mem_read(mr, physaddr, &val, 1 << SHIFT); return val; } +#endif #ifdef SOFTMMU_CODE_ACCESS static __attribute__((unused)) -- cgit v1.2.3 From 58ed270df9764c90b5b8b750be5af2f82ceab0be Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 18:00:25 +0100 Subject: softmmu: move softmmu_template.h out of include/ It is only included in cputlb.c now. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- include/exec/softmmu_template.h | 533 ---------------------------------------- 1 file changed, 533 deletions(-) delete mode 100644 include/exec/softmmu_template.h (limited to 'include') diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h deleted file mode 100644 index 5a07f991a1..0000000000 --- a/include/exec/softmmu_template.h +++ /dev/null @@ -1,533 +0,0 @@ -/* - * Software MMU support - * - * Generate helpers used by TCG for qemu_ld/st ops and code load - * functions. - * - * Included from target op helpers and exec.c. - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include "qemu/timer.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" - -#define DATA_SIZE (1 << SHIFT) - -#if DATA_SIZE == 8 -#define SUFFIX q -#define LSUFFIX q -#define SDATA_TYPE int64_t -#define DATA_TYPE uint64_t -#elif DATA_SIZE == 4 -#define SUFFIX l -#define LSUFFIX l -#define SDATA_TYPE int32_t -#define DATA_TYPE uint32_t -#elif DATA_SIZE == 2 -#define SUFFIX w -#define LSUFFIX uw -#define SDATA_TYPE int16_t -#define DATA_TYPE uint16_t -#elif DATA_SIZE == 1 -#define SUFFIX b -#define LSUFFIX ub -#define SDATA_TYPE int8_t -#define DATA_TYPE uint8_t -#else -#error unsupported data size -#endif - - -/* For the benefit of TCG generated code, we want to avoid the complication - of ABI-specific return type promotion and always return a value extended - to the register size of the host. This is tcg_target_long, except in the - case of a 32-bit host and 64-bit data, and for that we always have - uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ -#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8 -# define WORD_TYPE DATA_TYPE -# define USUFFIX SUFFIX -#else -# define WORD_TYPE tcg_target_ulong -# define USUFFIX glue(u, SUFFIX) -# define SSUFFIX glue(s, SUFFIX) -#endif - -#ifdef SOFTMMU_CODE_ACCESS -#define READ_ACCESS_TYPE 2 -#define ADDR_READ addr_code -#else -#define READ_ACCESS_TYPE 0 -#define ADDR_READ addr_read -#endif - -#if DATA_SIZE == 8 -# define BSWAP(X) bswap64(X) -#elif DATA_SIZE == 4 -# define BSWAP(X) bswap32(X) -#elif DATA_SIZE == 2 -# define BSWAP(X) bswap16(X) -#else -# define BSWAP(X) (X) -#endif - -#ifdef TARGET_WORDS_BIGENDIAN -# define TGT_BE(X) (X) -# define TGT_LE(X) BSWAP(X) -#else -# define TGT_BE(X) BSWAP(X) -# define TGT_LE(X) (X) -#endif - -#if DATA_SIZE == 1 -# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) -# define helper_be_ld_name helper_le_ld_name -# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX) -# define helper_be_lds_name helper_le_lds_name -# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX) -# define helper_be_st_name helper_le_st_name -#else -# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX) -# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX) -# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX) -# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX) -# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX) -# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) -#endif - -#ifdef TARGET_WORDS_BIGENDIAN -# define helper_te_ld_name helper_be_ld_name -# define helper_te_st_name helper_be_st_name -#else -# define helper_te_ld_name helper_le_ld_name -# define helper_te_st_name helper_le_st_name -#endif - -#ifndef SOFTMMU_CODE_ACCESS -static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, - hwaddr physaddr, - target_ulong addr, - uintptr_t retaddr) -{ - uint64_t val; - CPUState *cpu = ENV_GET_CPU(env); - MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr); - - physaddr = (physaddr & TARGET_PAGE_MASK) + addr; - cpu->mem_io_pc = retaddr; - if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) { - cpu_io_recompile(cpu, retaddr); - } - - cpu->mem_io_vaddr = addr; - io_mem_read(mr, physaddr, &val, 1 << SHIFT); - return val; -} -#endif - -#ifdef SOFTMMU_CODE_ACCESS -static __attribute__((unused)) -#endif -WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, - uintptr_t retaddr) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - uintptr_t haddr; - DATA_TYPE res; - - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } -#endif - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); - tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - ioaddr = env->iotlb[mmu_idx][index]; - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); - res = TGT_LE(res); - return res; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - target_ulong addr1, addr2; - DATA_TYPE res1, res2; - unsigned shift; - do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); -#endif - addr1 = addr & ~(DATA_SIZE - 1); - addr2 = addr1 + DATA_SIZE; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ); - res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ); - shift = (addr & (DATA_SIZE - 1)) * 8; - - /* Little-endian combine. */ - res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); - return res; - } - - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } -#endif - - haddr = addr + env->tlb_table[mmu_idx][index].addend; -#if DATA_SIZE == 1 - res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); -#else - res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr); -#endif - return res; -} - -#if DATA_SIZE > 1 -#ifdef SOFTMMU_CODE_ACCESS -static __attribute__((unused)) -#endif -WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, - uintptr_t retaddr) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - uintptr_t haddr; - DATA_TYPE res; - - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } -#endif - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); - tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - ioaddr = env->iotlb[mmu_idx][index]; - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); - res = TGT_BE(res); - return res; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - target_ulong addr1, addr2; - DATA_TYPE res1, res2; - unsigned shift; - do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); -#endif - addr1 = addr & ~(DATA_SIZE - 1); - addr2 = addr1 + DATA_SIZE; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ); - res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ); - shift = (addr & (DATA_SIZE - 1)) * 8; - - /* Big-endian combine. */ - res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); - return res; - } - - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } -#endif - - haddr = addr + env->tlb_table[mmu_idx][index].addend; - res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); - return res; -} -#endif /* DATA_SIZE > 1 */ - -DATA_TYPE -glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, - int mmu_idx) -{ - return helper_te_ld_name (env, addr, mmu_idx, GETRA()); -} - -#ifndef SOFTMMU_CODE_ACCESS - -/* Provide signed versions of the load routines as well. We can of course - avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ -#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS -WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr) -{ - return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr); -} - -# if DATA_SIZE > 1 -WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr) -{ - return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr); -} -# endif -#endif - -static inline void glue(io_write, SUFFIX)(CPUArchState *env, - hwaddr physaddr, - DATA_TYPE val, - target_ulong addr, - uintptr_t retaddr) -{ - CPUState *cpu = ENV_GET_CPU(env); - MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr); - - physaddr = (physaddr & TARGET_PAGE_MASK) + addr; - if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) { - cpu_io_recompile(cpu, retaddr); - } - - cpu->mem_io_vaddr = addr; - cpu->mem_io_pc = retaddr; - io_mem_write(mr, physaddr, val, 1 << SHIFT); -} - -void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, - int mmu_idx, uintptr_t retaddr) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - uintptr_t haddr; - - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); - } -#endif - tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - ioaddr = env->iotlb[mmu_idx][index]; - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - val = TGT_LE(val); - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); - return; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - int i; - do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); -#endif - /* XXX: not efficient, but simple */ - /* Note: relies on the fact that tlb_fill() does not remove the - * previous page from the TLB cache. */ - for (i = DATA_SIZE - 1; i >= 0; i--) { - /* Little-endian extract. */ - uint8_t val8 = val >> (i * 8); - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - mmu_idx, retaddr + GETPC_ADJ); - } - return; - } - - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); - } -#endif - - haddr = addr + env->tlb_table[mmu_idx][index].addend; -#if DATA_SIZE == 1 - glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); -#else - glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); -#endif -} - -#if DATA_SIZE > 1 -void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, - int mmu_idx, uintptr_t retaddr) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - uintptr_t haddr; - - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); - } -#endif - tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - ioaddr = env->iotlb[mmu_idx][index]; - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - val = TGT_BE(val); - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); - return; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - int i; - do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); -#endif - /* XXX: not efficient, but simple */ - /* Note: relies on the fact that tlb_fill() does not remove the - * previous page from the TLB cache. */ - for (i = DATA_SIZE - 1; i >= 0; i--) { - /* Big-endian extract. */ - uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - mmu_idx, retaddr + GETPC_ADJ); - } - return; - } - - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); - } -#endif - - haddr = addr + env->tlb_table[mmu_idx][index].addend; - glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); -} -#endif /* DATA_SIZE > 1 */ - -void -glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, - DATA_TYPE val, int mmu_idx) -{ - helper_te_st_name(env, addr, val, mmu_idx, GETRA()); -} - -#endif /* !defined(SOFTMMU_CODE_ACCESS) */ - -#undef READ_ACCESS_TYPE -#undef SHIFT -#undef DATA_TYPE -#undef SUFFIX -#undef LSUFFIX -#undef DATA_SIZE -#undef ADDR_READ -#undef WORD_TYPE -#undef SDATA_TYPE -#undef USUFFIX -#undef SSUFFIX -#undef BSWAP -#undef TGT_BE -#undef TGT_LE -#undef CPU_BE -#undef CPU_LE -#undef helper_le_ld_name -#undef helper_be_ld_name -#undef helper_le_lds_name -#undef helper_be_lds_name -#undef helper_le_st_name -#undef helper_be_st_name -#undef helper_te_ld_name -#undef helper_te_st_name -- cgit v1.2.3 From f08b617018e424134a0a012b08253d567c62f7ee Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 19:42:10 +0100 Subject: softmmu: introduce cpu_ldst.h This will collect all load and store helpers soon. For now it is just a replacement for softmmu_exec.h, which this patch stops including directly, but we also include it where this will be necessary in order to simplify the next patch. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- include/exec/cpu_ldst.h | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 include/exec/cpu_ldst.h (limited to 'include') diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h new file mode 100644 index 0000000000..a6b7884a67 --- /dev/null +++ b/include/exec/cpu_ldst.h @@ -0,0 +1,35 @@ +/* + * Software MMU support + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +/* + * Generate inline load/store functions for all MMU modes (typically + * at least _user and _kernel) as well as _data versions, for all data + * sizes. + * + * Used by target op helpers. + * + * MMU mode suffixes are defined in target cpu.h. + */ +#ifndef CPU_LDST_H +#define CPU_LDST_H + +#if !defined(CONFIG_USER_ONLY) +#include "exec/softmmu_exec.h" +#endif + +#endif /* CPU_LDST_H */ -- cgit v1.2.3 From c773828aa9259664bc24272b0cab781245409e1f Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 28 Mar 2014 19:11:26 +0100 Subject: softmmu: move all load/store functions to cpu_ldst.h Unify pieces of cpu-all.h, exec-all.h, softmmu_exec.h and tcg/tcg.h into a single new header file with all helpers. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- include/exec/cpu-all.h | 119 ------------- include/exec/cpu_ldst.h | 369 ++++++++++++++++++++++++++++++++++++++- include/exec/cpu_ldst_template.h | 193 ++++++++++++++++++++ include/exec/exec-all.h | 25 --- include/exec/softmmu_exec.h | 216 ----------------------- include/exec/softmmu_header.h | 193 -------------------- 6 files changed, 560 insertions(+), 555 deletions(-) create mode 100644 include/exec/cpu_ldst_template.h delete mode 100644 include/exec/softmmu_exec.h delete mode 100644 include/exec/softmmu_header.h (limited to 'include') diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 9cab592dc5..e8363d7248 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -198,127 +198,8 @@ extern unsigned long reserved_va; #define RESERVED_VA 0ul #endif -/* All direct uses of g2h and h2g need to go away for usermode softmmu. */ -#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE)) - -#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS -#define h2g_valid(x) 1 -#else -#define h2g_valid(x) ({ \ - unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ - (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \ - (!RESERVED_VA || (__guest < RESERVED_VA)); \ -}) #endif -#define h2g_nocheck(x) ({ \ - unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ - (abi_ulong)__ret; \ -}) - -#define h2g(x) ({ \ - /* Check if given address fits target address space */ \ - assert(h2g_valid(x)); \ - h2g_nocheck(x); \ -}) - -#define saddr(x) g2h(x) -#define laddr(x) g2h(x) - -#else /* !CONFIG_USER_ONLY */ -/* NOTE: we use double casts if pointers and target_ulong have - different sizes */ -#define saddr(x) (uint8_t *)(intptr_t)(x) -#define laddr(x) (uint8_t *)(intptr_t)(x) -#endif - -#define ldub_raw(p) ldub_p(laddr((p))) -#define ldsb_raw(p) ldsb_p(laddr((p))) -#define lduw_raw(p) lduw_p(laddr((p))) -#define ldsw_raw(p) ldsw_p(laddr((p))) -#define ldl_raw(p) ldl_p(laddr((p))) -#define ldq_raw(p) ldq_p(laddr((p))) -#define ldfl_raw(p) ldfl_p(laddr((p))) -#define ldfq_raw(p) ldfq_p(laddr((p))) -#define stb_raw(p, v) stb_p(saddr((p)), v) -#define stw_raw(p, v) stw_p(saddr((p)), v) -#define stl_raw(p, v) stl_p(saddr((p)), v) -#define stq_raw(p, v) stq_p(saddr((p)), v) -#define stfl_raw(p, v) stfl_p(saddr((p)), v) -#define stfq_raw(p, v) stfq_p(saddr((p)), v) - - -#if defined(CONFIG_USER_ONLY) - -/* if user mode, no other memory access functions */ -#define ldub(p) ldub_raw(p) -#define ldsb(p) ldsb_raw(p) -#define lduw(p) lduw_raw(p) -#define ldsw(p) ldsw_raw(p) -#define ldl(p) ldl_raw(p) -#define ldq(p) ldq_raw(p) -#define ldfl(p) ldfl_raw(p) -#define ldfq(p) ldfq_raw(p) -#define stb(p, v) stb_raw(p, v) -#define stw(p, v) stw_raw(p, v) -#define stl(p, v) stl_raw(p, v) -#define stq(p, v) stq_raw(p, v) -#define stfl(p, v) stfl_raw(p, v) -#define stfq(p, v) stfq_raw(p, v) - -#define cpu_ldub_code(env1, p) ldub_raw(p) -#define cpu_ldsb_code(env1, p) ldsb_raw(p) -#define cpu_lduw_code(env1, p) lduw_raw(p) -#define cpu_ldsw_code(env1, p) ldsw_raw(p) -#define cpu_ldl_code(env1, p) ldl_raw(p) -#define cpu_ldq_code(env1, p) ldq_raw(p) - -#define cpu_ldub_data(env, addr) ldub_raw(addr) -#define cpu_lduw_data(env, addr) lduw_raw(addr) -#define cpu_ldsw_data(env, addr) ldsw_raw(addr) -#define cpu_ldl_data(env, addr) ldl_raw(addr) -#define cpu_ldq_data(env, addr) ldq_raw(addr) - -#define cpu_stb_data(env, addr, data) stb_raw(addr, data) -#define cpu_stw_data(env, addr, data) stw_raw(addr, data) -#define cpu_stl_data(env, addr, data) stl_raw(addr, data) -#define cpu_stq_data(env, addr, data) stq_raw(addr, data) - -#define cpu_ldub_kernel(env, addr) ldub_raw(addr) -#define cpu_lduw_kernel(env, addr) lduw_raw(addr) -#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr) -#define cpu_ldl_kernel(env, addr) ldl_raw(addr) -#define cpu_ldq_kernel(env, addr) ldq_raw(addr) - -#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data) -#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data) -#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data) -#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data) - -#define ldub_kernel(p) ldub_raw(p) -#define ldsb_kernel(p) ldsb_raw(p) -#define lduw_kernel(p) lduw_raw(p) -#define ldsw_kernel(p) ldsw_raw(p) -#define ldl_kernel(p) ldl_raw(p) -#define ldq_kernel(p) ldq_raw(p) -#define ldfl_kernel(p) ldfl_raw(p) -#define ldfq_kernel(p) ldfq_raw(p) -#define stb_kernel(p, v) stb_raw(p, v) -#define stw_kernel(p, v) stw_raw(p, v) -#define stl_kernel(p, v) stl_raw(p, v) -#define stq_kernel(p, v) stq_raw(p, v) -#define stfl_kernel(p, v) stfl_raw(p, v) -#define stfq_kernel(p, vt) stfq_raw(p, v) - -#define cpu_ldub_data(env, addr) ldub_raw(addr) -#define cpu_lduw_data(env, addr) lduw_raw(addr) -#define cpu_ldl_data(env, addr) ldl_raw(addr) - -#define cpu_stb_data(env, addr, data) stb_raw(addr, data) -#define cpu_stw_data(env, addr, data) stw_raw(addr, data) -#define cpu_stl_data(env, addr, data) stl_raw(addr, data) -#endif /* defined(CONFIG_USER_ONLY) */ - /* page related stuff */ #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h index a6b7884a67..e5550e7175 100644 --- a/include/exec/cpu_ldst.h +++ b/include/exec/cpu_ldst.h @@ -28,8 +28,373 @@ #ifndef CPU_LDST_H #define CPU_LDST_H -#if !defined(CONFIG_USER_ONLY) -#include "exec/softmmu_exec.h" +#if defined(CONFIG_USER_ONLY) +/* All direct uses of g2h and h2g need to go away for usermode softmmu. */ +#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE)) + +#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS +#define h2g_valid(x) 1 +#else +#define h2g_valid(x) ({ \ + unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ + (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \ + (!RESERVED_VA || (__guest < RESERVED_VA)); \ +}) +#endif + +#define h2g_nocheck(x) ({ \ + unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ + (abi_ulong)__ret; \ +}) + +#define h2g(x) ({ \ + /* Check if given address fits target address space */ \ + assert(h2g_valid(x)); \ + h2g_nocheck(x); \ +}) + +#define saddr(x) g2h(x) +#define laddr(x) g2h(x) + +#else /* !CONFIG_USER_ONLY */ +/* NOTE: we use double casts if pointers and target_ulong have + different sizes */ +#define saddr(x) (uint8_t *)(intptr_t)(x) +#define laddr(x) (uint8_t *)(intptr_t)(x) #endif +#define ldub_raw(p) ldub_p(laddr((p))) +#define ldsb_raw(p) ldsb_p(laddr((p))) +#define lduw_raw(p) lduw_p(laddr((p))) +#define ldsw_raw(p) ldsw_p(laddr((p))) +#define ldl_raw(p) ldl_p(laddr((p))) +#define ldq_raw(p) ldq_p(laddr((p))) +#define ldfl_raw(p) ldfl_p(laddr((p))) +#define ldfq_raw(p) ldfq_p(laddr((p))) +#define stb_raw(p, v) stb_p(saddr((p)), v) +#define stw_raw(p, v) stw_p(saddr((p)), v) +#define stl_raw(p, v) stl_p(saddr((p)), v) +#define stq_raw(p, v) stq_p(saddr((p)), v) +#define stfl_raw(p, v) stfl_p(saddr((p)), v) +#define stfq_raw(p, v) stfq_p(saddr((p)), v) + + +#if defined(CONFIG_USER_ONLY) + +/* if user mode, no other memory access functions */ +#define ldub(p) ldub_raw(p) +#define ldsb(p) ldsb_raw(p) +#define lduw(p) lduw_raw(p) +#define ldsw(p) ldsw_raw(p) +#define ldl(p) ldl_raw(p) +#define ldq(p) ldq_raw(p) +#define ldfl(p) ldfl_raw(p) +#define ldfq(p) ldfq_raw(p) +#define stb(p, v) stb_raw(p, v) +#define stw(p, v) stw_raw(p, v) +#define stl(p, v) stl_raw(p, v) +#define stq(p, v) stq_raw(p, v) +#define stfl(p, v) stfl_raw(p, v) +#define stfq(p, v) stfq_raw(p, v) + +#define cpu_ldub_code(env1, p) ldub_raw(p) +#define cpu_ldsb_code(env1, p) ldsb_raw(p) +#define cpu_lduw_code(env1, p) lduw_raw(p) +#define cpu_ldsw_code(env1, p) ldsw_raw(p) +#define cpu_ldl_code(env1, p) ldl_raw(p) +#define cpu_ldq_code(env1, p) ldq_raw(p) + +#define cpu_ldub_data(env, addr) ldub_raw(addr) +#define cpu_lduw_data(env, addr) lduw_raw(addr) +#define cpu_ldsw_data(env, addr) ldsw_raw(addr) +#define cpu_ldl_data(env, addr) ldl_raw(addr) +#define cpu_ldq_data(env, addr) ldq_raw(addr) + +#define cpu_stb_data(env, addr, data) stb_raw(addr, data) +#define cpu_stw_data(env, addr, data) stw_raw(addr, data) +#define cpu_stl_data(env, addr, data) stl_raw(addr, data) +#define cpu_stq_data(env, addr, data) stq_raw(addr, data) + +#define cpu_ldub_kernel(env, addr) ldub_raw(addr) +#define cpu_lduw_kernel(env, addr) lduw_raw(addr) +#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr) +#define cpu_ldl_kernel(env, addr) ldl_raw(addr) +#define cpu_ldq_kernel(env, addr) ldq_raw(addr) + +#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data) +#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data) +#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data) +#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data) + +#define ldub_kernel(p) ldub_raw(p) +#define ldsb_kernel(p) ldsb_raw(p) +#define lduw_kernel(p) lduw_raw(p) +#define ldsw_kernel(p) ldsw_raw(p) +#define ldl_kernel(p) ldl_raw(p) +#define ldq_kernel(p) ldq_raw(p) +#define ldfl_kernel(p) ldfl_raw(p) +#define ldfq_kernel(p) ldfq_raw(p) +#define stb_kernel(p, v) stb_raw(p, v) +#define stw_kernel(p, v) stw_raw(p, v) +#define stl_kernel(p, v) stl_raw(p, v) +#define stq_kernel(p, v) stq_raw(p, v) +#define stfl_kernel(p, v) stfl_raw(p, v) +#define stfq_kernel(p, vt) stfq_raw(p, v) + +#define cpu_ldub_data(env, addr) ldub_raw(addr) +#define cpu_lduw_data(env, addr) lduw_raw(addr) +#define cpu_ldl_data(env, addr) ldl_raw(addr) + +#define cpu_stb_data(env, addr, data) stb_raw(addr, data) +#define cpu_stw_data(env, addr, data) stw_raw(addr, data) +#define cpu_stl_data(env, addr, data) stl_raw(addr, data) + +#else + +/* XXX: find something cleaner. + * Furthermore, this is false for 64 bits targets + */ +#define ldul_user ldl_user +#define ldul_kernel ldl_kernel +#define ldul_hypv ldl_hypv +#define ldul_executive ldl_executive +#define ldul_supervisor ldl_supervisor + +/* The memory helpers for tcg-generated code need tcg_target_long etc. */ +#include "tcg.h" + +uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); + +void helper_stb_mmu(CPUArchState *env, target_ulong addr, + uint8_t val, int mmu_idx); +void helper_stw_mmu(CPUArchState *env, target_ulong addr, + uint16_t val, int mmu_idx); +void helper_stl_mmu(CPUArchState *env, target_ulong addr, + uint32_t val, int mmu_idx); +void helper_stq_mmu(CPUArchState *env, target_ulong addr, + uint64_t val, int mmu_idx); + +uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); + +#define CPU_MMU_INDEX 0 +#define MEMSUFFIX MMU_MODE0_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX + +#define CPU_MMU_INDEX 1 +#define MEMSUFFIX MMU_MODE1_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX + +#if (NB_MMU_MODES >= 3) + +#define CPU_MMU_INDEX 2 +#define MEMSUFFIX MMU_MODE2_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#endif /* (NB_MMU_MODES >= 3) */ + +#if (NB_MMU_MODES >= 4) + +#define CPU_MMU_INDEX 3 +#define MEMSUFFIX MMU_MODE3_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#endif /* (NB_MMU_MODES >= 4) */ + +#if (NB_MMU_MODES >= 5) + +#define CPU_MMU_INDEX 4 +#define MEMSUFFIX MMU_MODE4_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#endif /* (NB_MMU_MODES >= 5) */ + +#if (NB_MMU_MODES >= 6) + +#define CPU_MMU_INDEX 5 +#define MEMSUFFIX MMU_MODE5_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#endif /* (NB_MMU_MODES >= 6) */ + +#if (NB_MMU_MODES > 6) +#error "NB_MMU_MODES > 6 is not supported for now" +#endif /* (NB_MMU_MODES > 6) */ + +/* these access are slower, they must be as rare as possible */ +#define CPU_MMU_INDEX (cpu_mmu_index(env)) +#define MEMSUFFIX _data +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX + +#define ldub(p) ldub_data(p) +#define ldsb(p) ldsb_data(p) +#define lduw(p) lduw_data(p) +#define ldsw(p) ldsw_data(p) +#define ldl(p) ldl_data(p) +#define ldq(p) ldq_data(p) + +#define stb(p, v) stb_data(p, v) +#define stw(p, v) stw_data(p, v) +#define stl(p, v) stl_data(p, v) +#define stq(p, v) stq_data(p, v) + +#define CPU_MMU_INDEX (cpu_mmu_index(env)) +#define MEMSUFFIX _code +#define SOFTMMU_CODE_ACCESS + +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" + +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#undef SOFTMMU_CODE_ACCESS + +/** + * tlb_vaddr_to_host: + * @env: CPUArchState + * @addr: guest virtual address to look up + * @access_type: 0 for read, 1 for write, 2 for execute + * @mmu_idx: MMU index to use for lookup + * + * Look up the specified guest virtual index in the TCG softmmu TLB. + * If the TLB contains a host virtual address suitable for direct RAM + * access, then return it. Otherwise (TLB miss, TLB entry is for an + * I/O access, etc) return NULL. + * + * This is the equivalent of the initial fast-path code used by + * TCG backends for guest load and store accesses. + */ +static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr, + int access_type, int mmu_idx) +{ + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index]; + target_ulong tlb_addr; + uintptr_t haddr; + + switch (access_type) { + case 0: + tlb_addr = tlbentry->addr_read; + break; + case 1: + tlb_addr = tlbentry->addr_write; + break; + case 2: + tlb_addr = tlbentry->addr_code; + break; + default: + g_assert_not_reached(); + } + + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + /* TLB entry is for a different page */ + return NULL; + } + + if (tlb_addr & ~TARGET_PAGE_MASK) { + /* IO access */ + return NULL; + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; + return (void *)haddr; +} + +#endif /* defined(CONFIG_USER_ONLY) */ + #endif /* CPU_LDST_H */ diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h new file mode 100644 index 0000000000..006093ac49 --- /dev/null +++ b/include/exec/cpu_ldst_template.h @@ -0,0 +1,193 @@ +/* + * Software MMU support + * + * Generate inline load/store functions for one MMU mode and data + * size. + * + * Generate a store function as well as signed and unsigned loads. For + * 32 and 64 bit cases, also generate floating point functions with + * the same size. + * + * Not used directly but included from cpu_ldst.h. + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#if DATA_SIZE == 8 +#define SUFFIX q +#define USUFFIX q +#define DATA_TYPE uint64_t +#elif DATA_SIZE == 4 +#define SUFFIX l +#define USUFFIX l +#define DATA_TYPE uint32_t +#elif DATA_SIZE == 2 +#define SUFFIX w +#define USUFFIX uw +#define DATA_TYPE uint16_t +#define DATA_STYPE int16_t +#elif DATA_SIZE == 1 +#define SUFFIX b +#define USUFFIX ub +#define DATA_TYPE uint8_t +#define DATA_STYPE int8_t +#else +#error unsupported data size +#endif + +#if DATA_SIZE == 8 +#define RES_TYPE uint64_t +#else +#define RES_TYPE uint32_t +#endif + +#ifdef SOFTMMU_CODE_ACCESS +#define ADDR_READ addr_code +#define MMUSUFFIX _cmmu +#else +#define ADDR_READ addr_read +#define MMUSUFFIX _mmu +#endif + +/* generic load/store macros */ + +static inline RES_TYPE +glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) +{ + int page_index; + RES_TYPE res; + target_ulong addr; + int mmu_idx; + + addr = ptr; + page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + mmu_idx = CPU_MMU_INDEX; + if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != + (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { + res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx); + } else { + uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; + res = glue(glue(ld, USUFFIX), _raw)(hostaddr); + } + return res; +} + +#if DATA_SIZE <= 2 +static inline int +glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) +{ + int res, page_index; + target_ulong addr; + int mmu_idx; + + addr = ptr; + page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + mmu_idx = CPU_MMU_INDEX; + if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != + (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { + res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX), + MMUSUFFIX)(env, addr, mmu_idx); + } else { + uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; + res = glue(glue(lds, SUFFIX), _raw)(hostaddr); + } + return res; +} +#endif + +#ifndef SOFTMMU_CODE_ACCESS + +/* generic store macro */ + +static inline void +glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr, + RES_TYPE v) +{ + int page_index; + target_ulong addr; + int mmu_idx; + + addr = ptr; + page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + mmu_idx = CPU_MMU_INDEX; + if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write != + (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { + glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx); + } else { + uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; + glue(glue(st, SUFFIX), _raw)(hostaddr, v); + } +} + + + +#if DATA_SIZE == 8 +static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env, + target_ulong ptr) +{ + union { + float64 d; + uint64_t i; + } u; + u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr); + return u.d; +} + +static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env, + target_ulong ptr, float64 v) +{ + union { + float64 d; + uint64_t i; + } u; + u.d = v; + glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i); +} +#endif /* DATA_SIZE == 8 */ + +#if DATA_SIZE == 4 +static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env, + target_ulong ptr) +{ + union { + float32 f; + uint32_t i; + } u; + u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr); + return u.f; +} + +static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env, + target_ulong ptr, float32 v) +{ + union { + float32 f; + uint32_t i; + } u; + u.f = v; + glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i); +} +#endif /* DATA_SIZE == 4 */ + +#endif /* !SOFTMMU_CODE_ACCESS */ + +#undef RES_TYPE +#undef DATA_TYPE +#undef DATA_STYPE +#undef SUFFIX +#undef USUFFIX +#undef DATA_SIZE +#undef MMUSUFFIX +#undef ADDR_READ diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index ef72027dd8..3d62d9c464 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -344,31 +344,6 @@ bool io_mem_write(struct MemoryRegion *mr, hwaddr addr, void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx, uintptr_t retaddr); -uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); - -#define CPU_MMU_INDEX (cpu_mmu_index(env)) -#define MEMSUFFIX _code -#define SOFTMMU_CODE_ACCESS - -#define DATA_SIZE 1 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 2 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 4 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 8 -#include "exec/softmmu_header.h" - -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#undef SOFTMMU_CODE_ACCESS - #endif #if defined(CONFIG_USER_ONLY) diff --git a/include/exec/softmmu_exec.h b/include/exec/softmmu_exec.h deleted file mode 100644 index 8217733a9f..0000000000 --- a/include/exec/softmmu_exec.h +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Software MMU support - * - * Generate inline load/store functions for all MMU modes (typically - * at least _user and _kernel) as well as _data versions, for all data - * sizes. - * - * Used by target op helpers. - * - * MMU mode suffixes are defined in target cpu.h. - */ - -/* XXX: find something cleaner. - * Furthermore, this is false for 64 bits targets - */ -#define ldul_user ldl_user -#define ldul_kernel ldl_kernel -#define ldul_hypv ldl_hypv -#define ldul_executive ldl_executive -#define ldul_supervisor ldl_supervisor - -/* The memory helpers for tcg-generated code need tcg_target_long etc. */ -#include "tcg.h" - -#define CPU_MMU_INDEX 0 -#define MEMSUFFIX MMU_MODE0_SUFFIX -#define DATA_SIZE 1 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 2 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 4 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 8 -#include "exec/softmmu_header.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX - -#define CPU_MMU_INDEX 1 -#define MEMSUFFIX MMU_MODE1_SUFFIX -#define DATA_SIZE 1 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 2 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 4 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 8 -#include "exec/softmmu_header.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX - -#if (NB_MMU_MODES >= 3) - -#define CPU_MMU_INDEX 2 -#define MEMSUFFIX MMU_MODE2_SUFFIX -#define DATA_SIZE 1 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 2 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 4 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 8 -#include "exec/softmmu_header.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#endif /* (NB_MMU_MODES >= 3) */ - -#if (NB_MMU_MODES >= 4) - -#define CPU_MMU_INDEX 3 -#define MEMSUFFIX MMU_MODE3_SUFFIX -#define DATA_SIZE 1 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 2 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 4 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 8 -#include "exec/softmmu_header.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#endif /* (NB_MMU_MODES >= 4) */ - -#if (NB_MMU_MODES >= 5) - -#define CPU_MMU_INDEX 4 -#define MEMSUFFIX MMU_MODE4_SUFFIX -#define DATA_SIZE 1 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 2 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 4 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 8 -#include "exec/softmmu_header.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#endif /* (NB_MMU_MODES >= 5) */ - -#if (NB_MMU_MODES >= 6) - -#define CPU_MMU_INDEX 5 -#define MEMSUFFIX MMU_MODE5_SUFFIX -#define DATA_SIZE 1 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 2 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 4 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 8 -#include "exec/softmmu_header.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#endif /* (NB_MMU_MODES >= 6) */ - -#if (NB_MMU_MODES > 6) -#error "NB_MMU_MODES > 6 is not supported for now" -#endif /* (NB_MMU_MODES > 6) */ - -/* these access are slower, they must be as rare as possible */ -#define CPU_MMU_INDEX (cpu_mmu_index(env)) -#define MEMSUFFIX _data -#define DATA_SIZE 1 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 2 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 4 -#include "exec/softmmu_header.h" - -#define DATA_SIZE 8 -#include "exec/softmmu_header.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX - -#define ldub(p) ldub_data(p) -#define ldsb(p) ldsb_data(p) -#define lduw(p) lduw_data(p) -#define ldsw(p) ldsw_data(p) -#define ldl(p) ldl_data(p) -#define ldq(p) ldq_data(p) - -#define stb(p, v) stb_data(p, v) -#define stw(p, v) stw_data(p, v) -#define stl(p, v) stl_data(p, v) -#define stq(p, v) stq_data(p, v) - -/** - * tlb_vaddr_to_host: - * @env: CPUArchState - * @addr: guest virtual address to look up - * @access_type: 0 for read, 1 for write, 2 for execute - * @mmu_idx: MMU index to use for lookup - * - * Look up the specified guest virtual index in the TCG softmmu TLB. - * If the TLB contains a host virtual address suitable for direct RAM - * access, then return it. Otherwise (TLB miss, TLB entry is for an - * I/O access, etc) return NULL. - * - * This is the equivalent of the initial fast-path code used by - * TCG backends for guest load and store accesses. - */ -static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr, - int access_type, int mmu_idx) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index]; - target_ulong tlb_addr; - uintptr_t haddr; - - switch (access_type) { - case 0: - tlb_addr = tlbentry->addr_read; - break; - case 1: - tlb_addr = tlbentry->addr_write; - break; - case 2: - tlb_addr = tlbentry->addr_code; - break; - default: - g_assert_not_reached(); - } - - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - /* TLB entry is for a different page */ - return NULL; - } - - if (tlb_addr & ~TARGET_PAGE_MASK) { - /* IO access */ - return NULL; - } - - haddr = addr + env->tlb_table[mmu_idx][index].addend; - return (void *)haddr; -} diff --git a/include/exec/softmmu_header.h b/include/exec/softmmu_header.h deleted file mode 100644 index bb18f97024..0000000000 --- a/include/exec/softmmu_header.h +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Software MMU support - * - * Generate inline load/store functions for one MMU mode and data - * size. - * - * Generate a store function as well as signed and unsigned loads. For - * 32 and 64 bit cases, also generate floating point functions with - * the same size. - * - * Not used directly but included from softmmu_exec.h and exec-all.h. - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#if DATA_SIZE == 8 -#define SUFFIX q -#define USUFFIX q -#define DATA_TYPE uint64_t -#elif DATA_SIZE == 4 -#define SUFFIX l -#define USUFFIX l -#define DATA_TYPE uint32_t -#elif DATA_SIZE == 2 -#define SUFFIX w -#define USUFFIX uw -#define DATA_TYPE uint16_t -#define DATA_STYPE int16_t -#elif DATA_SIZE == 1 -#define SUFFIX b -#define USUFFIX ub -#define DATA_TYPE uint8_t -#define DATA_STYPE int8_t -#else -#error unsupported data size -#endif - -#if DATA_SIZE == 8 -#define RES_TYPE uint64_t -#else -#define RES_TYPE uint32_t -#endif - -#ifdef SOFTMMU_CODE_ACCESS -#define ADDR_READ addr_code -#define MMUSUFFIX _cmmu -#else -#define ADDR_READ addr_read -#define MMUSUFFIX _mmu -#endif - -/* generic load/store macros */ - -static inline RES_TYPE -glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) -{ - int page_index; - RES_TYPE res; - target_ulong addr; - int mmu_idx; - - addr = ptr; - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - mmu_idx = CPU_MMU_INDEX; - if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != - (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx); - } else { - uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; - res = glue(glue(ld, USUFFIX), _raw)(hostaddr); - } - return res; -} - -#if DATA_SIZE <= 2 -static inline int -glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) -{ - int res, page_index; - target_ulong addr; - int mmu_idx; - - addr = ptr; - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - mmu_idx = CPU_MMU_INDEX; - if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != - (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX), - MMUSUFFIX)(env, addr, mmu_idx); - } else { - uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; - res = glue(glue(lds, SUFFIX), _raw)(hostaddr); - } - return res; -} -#endif - -#ifndef SOFTMMU_CODE_ACCESS - -/* generic store macro */ - -static inline void -glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr, - RES_TYPE v) -{ - int page_index; - target_ulong addr; - int mmu_idx; - - addr = ptr; - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - mmu_idx = CPU_MMU_INDEX; - if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write != - (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx); - } else { - uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; - glue(glue(st, SUFFIX), _raw)(hostaddr, v); - } -} - - - -#if DATA_SIZE == 8 -static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env, - target_ulong ptr) -{ - union { - float64 d; - uint64_t i; - } u; - u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr); - return u.d; -} - -static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env, - target_ulong ptr, float64 v) -{ - union { - float64 d; - uint64_t i; - } u; - u.d = v; - glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i); -} -#endif /* DATA_SIZE == 8 */ - -#if DATA_SIZE == 4 -static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env, - target_ulong ptr) -{ - union { - float32 f; - uint32_t i; - } u; - u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr); - return u.f; -} - -static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env, - target_ulong ptr, float32 v) -{ - union { - float32 f; - uint32_t i; - } u; - u.f = v; - glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i); -} -#endif /* DATA_SIZE == 4 */ - -#endif /* !SOFTMMU_CODE_ACCESS */ - -#undef RES_TYPE -#undef DATA_TYPE -#undef DATA_STYPE -#undef SUFFIX -#undef USUFFIX -#undef DATA_SIZE -#undef MMUSUFFIX -#undef ADDR_READ -- cgit v1.2.3