summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@amazon.com>2013-10-11 09:36:52 -0700
committerAnthony Liguori <aliguori@amazon.com>2013-10-11 09:36:52 -0700
commitab1eb72b1db1740093d52207887a2cfc8665bad6 (patch)
tree4f90bcf16b0d52050d1a1dd0a2892b7c1a5ee61d
parenta3400aeede46c6c30b6fefb20fc90a43f1f6e7b2 (diff)
parent867b3201a333e35a91bea9febc66cce689a765c4 (diff)
downloadqemu-ab1eb72b1db1740093d52207887a2cfc8665bad6.zip
Merge remote-tracking branch 'rth/tcg-pull' into staging
# By Richard Henderson # Via Richard Henderson * rth/tcg-pull: exec: Add both big- and little-endian memory helpers tcg: Add qemu_ld_st_i32/64 tcg: Add TCGMemOp configure: Remove CONFIG_QEMU_LDST_OPTIMIZATION tcg: Add tcg-be-ldst.h tcg: Add tcg-be-null.h exec: Delete is_tcg_gen_code and GETRA_EXT tcg-aarch64: Update to helper_ret_*_mmu routines tcg: Merge tcg_register_helper into tcg_context_init tcg: Add tcg-runtime.c helpers to all_helpers tcg: Put target helper data into an array. tcg: Remove stray semi-colons from target-*/helper.h tcg: Move helper registration into tcg_context_init target-m68k: Rename helpers.h to helper.h tcg: Use a GHashTable for tcg_find_helper tcg: Delete tcg_helper_get_name declaration tcg-hppa: Remove tcg backend Message-id: 1381440525-6666-1-git-send-email-rth@twiddle.net Signed-off-by: Anthony Liguori <aliguori@amazon.com>
-rw-r--r--MAINTAINERS5
-rwxr-xr-xconfigure11
-rw-r--r--include/exec/def-helper.h3
-rw-r--r--include/exec/exec-all.h30
-rw-r--r--include/exec/softmmu_template.h286
-rw-r--r--target-alpha/helper.h2
-rw-r--r--target-alpha/translate.c4
-rw-r--r--target-arm/helper.h8
-rw-r--r--target-arm/translate.c3
-rw-r--r--target-cris/helper.h8
-rw-r--r--target-cris/translate.c3
-rw-r--r--target-i386/translate.c4
-rw-r--r--target-m68k/helper.c2
-rw-r--r--target-m68k/helper.h (renamed from target-m68k/helpers.h)0
-rw-r--r--target-m68k/op_helper.c2
-rw-r--r--target-m68k/translate.c7
-rw-r--r--target-microblaze/translate.c2
-rw-r--r--target-mips/helper.h12
-rw-r--r--target-mips/translate.c4
-rw-r--r--target-openrisc/translate.c2
-rw-r--r--target-ppc/helper.h10
-rw-r--r--target-ppc/translate.c4
-rw-r--r--target-s390x/translate.c4
-rw-r--r--target-sh4/translate.c4
-rw-r--r--target-sparc/helper.h18
-rw-r--r--target-sparc/translate.c5
-rw-r--r--target-unicore32/translate.c3
-rw-r--r--target-xtensa/translate.c2
-rw-r--r--tcg/README43
-rw-r--r--tcg/aarch64/tcg-target.c51
-rw-r--r--tcg/aarch64/tcg-target.h2
-rw-r--r--tcg/arm/tcg-target.c27
-rw-r--r--tcg/arm/tcg-target.h2
-rw-r--r--tcg/hppa/tcg-target.c1831
-rw-r--r--tcg/hppa/tcg-target.h123
-rw-r--r--tcg/i386/tcg-target.c30
-rw-r--r--tcg/i386/tcg-target.h2
-rw-r--r--tcg/ia64/tcg-target.c2
-rw-r--r--tcg/ia64/tcg-target.h2
-rw-r--r--tcg/mips/tcg-target.c2
-rw-r--r--tcg/mips/tcg-target.h2
-rw-r--r--tcg/ppc/tcg-target.c28
-rw-r--r--tcg/ppc/tcg-target.h2
-rw-r--r--tcg/ppc64/tcg-target.c26
-rw-r--r--tcg/ppc64/tcg-target.h2
-rw-r--r--tcg/s390/tcg-target.c2
-rw-r--r--tcg/s390/tcg-target.h2
-rw-r--r--tcg/sparc/tcg-target.c2
-rw-r--r--tcg/sparc/tcg-target.h2
-rw-r--r--tcg/tcg-be-ldst.h90
-rw-r--r--tcg/tcg-be-null.h43
-rw-r--r--tcg/tcg-op.h239
-rw-r--r--tcg/tcg-opc.h96
-rw-r--r--tcg/tcg.c338
-rw-r--r--tcg/tcg.h166
-rw-r--r--tcg/tci/tcg-target.c2
-rw-r--r--tcg/tci/tcg-target.h2
-rw-r--r--translate-all.c12
58 files changed, 992 insertions, 2629 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 3c3e9fef18..77edacf271 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -793,11 +793,6 @@ M: Andrzej Zaborowski <balrogg@gmail.com>
S: Maintained
F: tcg/arm/
-HPPA target
-M: Richard Henderson <rth@twiddle.net>
-S: Maintained
-F: tcg/hppa/
-
i386 target
M: qemu-devel@nongnu.org
S: Maintained
diff --git a/configure b/configure
index 23dbaaffcc..57ee62a696 100755
--- a/configure
+++ b/configure
@@ -429,9 +429,6 @@ case "$cpu" in
aarch64)
cpu="aarch64"
;;
- hppa|parisc|parisc64)
- cpu="hppa"
- ;;
mips*)
cpu="mips"
;;
@@ -3794,14 +3791,6 @@ echo "libs_softmmu=$libs_softmmu" >> $config_host_mak
echo "ARCH=$ARCH" >> $config_host_mak
-case "$cpu" in
- aarch64 | arm | i386 | x86_64 | x32 | ppc*)
- # The TCG interpreter currently does not support ld/st optimization.
- if test "$tcg_interpreter" = "no" ; then
- echo "CONFIG_QEMU_LDST_OPTIMIZATION=y" >> $config_host_mak
- fi
- ;;
-esac
if test "$debug_tcg" = "yes" ; then
echo "CONFIG_DEBUG_TCG=y" >> $config_host_mak
fi
diff --git a/include/exec/def-helper.h b/include/exec/def-helper.h
index 022a9ceb6a..73d51f9cf5 100644
--- a/include/exec/def-helper.h
+++ b/include/exec/def-helper.h
@@ -240,8 +240,7 @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
#elif GEN_HELPER == 2
/* Register helpers. */
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
-tcg_register_helper(HELPER(name), #name);
+#define DEF_HELPER_FLAGS_0(name, flags, ret) { HELPER(name), #name },
#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
DEF_HELPER_FLAGS_0(name, flags, ret)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 8dd15948d8..6ad05cacf5 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -320,36 +320,6 @@ extern uintptr_t tci_tb_ptr;
#define GETPC() (GETRA() - GETPC_ADJ)
-/* The LDST optimizations splits code generation into fast and slow path.
- In some implementations, we pass the "logical" return address manually;
- in others, we must infer the logical return from the true return. */
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
-# if defined(__aarch64__)
-# define GETRA_LDST(RA) tcg_getra_ldst(RA)
-static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
-{
- int32_t b;
- ra += 4; /* skip one instruction */
- b = *(int32_t *)ra; /* load the branch insn */
- b = (b << 6) >> (6 - 2); /* extract the displacement */
- ra += b; /* apply the displacement */
- return ra;
-}
-# endif
-#endif /* CONFIG_QEMU_LDST_OPTIMIZATION */
-
-/* ??? Delete these once they are no longer used. */
-bool is_tcg_gen_code(uintptr_t pc_ptr);
-#ifdef GETRA_LDST
-# define GETRA_EXT() tcg_getra_ext(GETRA())
-static inline uintptr_t tcg_getra_ext(uintptr_t ra)
-{
- return is_tcg_gen_code(ra) ? GETRA_LDST(ra) : ra;
-}
-#else
-# define GETRA_EXT() GETRA()
-#endif
-
#if !defined(CONFIG_USER_ONLY)
void phys_mem_set_alloc(void *(*alloc)(ram_addr_t));
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
index 5bbc56afd5..c6a544069c 100644
--- a/include/exec/softmmu_template.h
+++ b/include/exec/softmmu_template.h
@@ -70,6 +70,48 @@
#define ADDR_READ addr_read
#endif
+#if DATA_SIZE == 8
+# define BSWAP(X) bswap64(X)
+#elif DATA_SIZE == 4
+# define BSWAP(X) bswap32(X)
+#elif DATA_SIZE == 2
+# define BSWAP(X) bswap16(X)
+#else
+# define BSWAP(X) (X)
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+# define TGT_BE(X) (X)
+# define TGT_LE(X) BSWAP(X)
+#else
+# define TGT_BE(X) BSWAP(X)
+# define TGT_LE(X) (X)
+#endif
+
+#if DATA_SIZE == 1
+# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
+# define helper_be_ld_name helper_le_ld_name
+# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
+# define helper_be_lds_name helper_le_lds_name
+# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
+# define helper_be_st_name helper_le_st_name
+#else
+# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
+# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
+# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
+# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
+# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
+# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+# define helper_te_ld_name helper_be_ld_name
+# define helper_te_st_name helper_be_st_name
+#else
+# define helper_te_ld_name helper_le_ld_name
+# define helper_te_st_name helper_le_st_name
+#endif
+
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
target_ulong addr,
@@ -89,18 +131,16 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
return val;
}
-/* handle all cases except unaligned access which span two pages */
#ifdef SOFTMMU_CODE_ACCESS
-static
+static __attribute__((unused))
#endif
-WORD_TYPE
-glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr, int mmu_idx,
- uintptr_t retaddr)
+WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
+ uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr;
+ DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@@ -124,7 +164,12 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
goto do_unaligned_access;
}
ioaddr = env->iotlb[mmu_idx][index];
- return glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+
+ /* ??? Note that the io helpers always read data in the target
+ byte ordering. We should push the LE/BE request down into io. */
+ res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+ res = TGT_LE(res);
+ return res;
}
/* Handle slow unaligned access (it spans two pages or IO). */
@@ -132,7 +177,7 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
target_ulong addr1, addr2;
- DATA_TYPE res1, res2, res;
+ DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
#ifdef ALIGNED_ONLY
@@ -142,16 +187,94 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
- res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
- (env, addr1, mmu_idx, retaddr + GETPC_ADJ);
- res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
- (env, addr2, mmu_idx, retaddr + GETPC_ADJ);
+ res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
+ res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
-#ifdef TARGET_WORDS_BIGENDIAN
- res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
-#else
+
+ /* Little-endian combine. */
res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
+ return res;
+ }
+
+ /* Handle aligned access or unaligned access in the same page. */
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ }
+#endif
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+#if DATA_SIZE == 1
+ res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
+#else
+ res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
+#endif
+ return res;
+}
+
+#if DATA_SIZE > 1
+#ifdef SOFTMMU_CODE_ACCESS
+static __attribute__((unused))
+#endif
+WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
+ uintptr_t retaddr)
+{
+ int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ uintptr_t haddr;
+ DATA_TYPE res;
+
+ /* Adjust the given return address. */
+ retaddr -= GETPC_ADJ;
+
+ /* If the TLB entry is for a different page, reload and try again. */
+ if ((addr & TARGET_PAGE_MASK)
+ != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ }
+#endif
+ tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ }
+
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ hwaddr ioaddr;
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+ ioaddr = env->iotlb[mmu_idx][index];
+
+ /* ??? Note that the io helpers always read data in the target
+ byte ordering. We should push the LE/BE request down into io. */
+ res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+ res = TGT_BE(res);
+ return res;
+ }
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (DATA_SIZE > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
+ >= TARGET_PAGE_SIZE)) {
+ target_ulong addr1, addr2;
+ DATA_TYPE res1, res2;
+ unsigned shift;
+ do_unaligned_access:
+#ifdef ALIGNED_ONLY
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
#endif
+ addr1 = addr & ~(DATA_SIZE - 1);
+ addr2 = addr1 + DATA_SIZE;
+ /* Note the adjustment at the beginning of the function.
+ Undo that for the recursion. */
+ res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
+ res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
+ shift = (addr & (DATA_SIZE - 1)) * 8;
+
+ /* Big-endian combine. */
+ res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
return res;
}
@@ -163,16 +286,16 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
- /* Note that ldl_raw is defined with type "int". */
- return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
+ res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
+ return res;
}
+#endif /* DATA_SIZE > 1 */
DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
int mmu_idx)
{
- return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
- GETRA_EXT());
+ return helper_te_ld_name (env, addr, mmu_idx, GETRA());
}
#ifndef SOFTMMU_CODE_ACCESS
@@ -180,14 +303,19 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
/* Provide signed versions of the load routines as well. We can of course
avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
-WORD_TYPE
-glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr, int mmu_idx,
- uintptr_t retaddr)
+WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr)
+{
+ return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
+}
+
+# if DATA_SIZE > 1
+WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr)
{
- return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
- (env, addr, mmu_idx, retaddr);
+ return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
}
+# endif
#endif
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
@@ -208,10 +336,8 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
io_mem_write(mr, physaddr, val, 1 << SHIFT);
}
-void
-glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr, DATA_TYPE val,
- int mmu_idx, uintptr_t retaddr)
+void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
+ int mmu_idx, uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
@@ -239,6 +365,10 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
goto do_unaligned_access;
}
ioaddr = env->iotlb[mmu_idx][index];
+
+ /* ??? Note that the io helpers always read data in the target
+ byte ordering. We should push the LE/BE request down into io. */
+ val = TGT_LE(val);
glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
return;
}
@@ -256,11 +386,84 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
for (i = DATA_SIZE - 1; i >= 0; i--) {
-#ifdef TARGET_WORDS_BIGENDIAN
- uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
-#else
+ /* Little-endian extract. */
uint8_t val8 = val >> (i * 8);
+ /* Note the adjustment at the beginning of the function.
+ Undo that for the recursion. */
+ glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
+ mmu_idx, retaddr + GETPC_ADJ);
+ }
+ return;
+ }
+
+ /* Handle aligned access or unaligned access in the same page. */
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+ }
+#endif
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+#if DATA_SIZE == 1
+ glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
+#else
+ glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
#endif
+}
+
+#if DATA_SIZE > 1
+void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ uintptr_t haddr;
+
+ /* Adjust the given return address. */
+ retaddr -= GETPC_ADJ;
+
+ /* If the TLB entry is for a different page, reload and try again. */
+ if ((addr & TARGET_PAGE_MASK)
+ != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+ }
+#endif
+ tlb_fill(env, addr, 1, mmu_idx, retaddr);
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ }
+
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ hwaddr ioaddr;
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+ ioaddr = env->iotlb[mmu_idx][index];
+
+ /* ??? Note that the io helpers always read data in the target
+ byte ordering. We should push the LE/BE request down into io. */
+ val = TGT_BE(val);
+ glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
+ return;
+ }
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (DATA_SIZE > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
+ >= TARGET_PAGE_SIZE)) {
+ int i;
+ do_unaligned_access:
+#ifdef ALIGNED_ONLY
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+#endif
+ /* XXX: not efficient, but simple */
+ /* Note: relies on the fact that tlb_fill() does not remove the
+ * previous page from the TLB cache. */
+ for (i = DATA_SIZE - 1; i >= 0; i--) {
+ /* Big-endian extract. */
+ uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
@@ -277,15 +480,15 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
- glue(glue(st, SUFFIX), _raw)((uint8_t *)haddr, val);
+ glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
}
+#endif /* DATA_SIZE > 1 */
void
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
DATA_TYPE val, int mmu_idx)
{
- glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
- GETRA_EXT());
+ helper_te_st_name(env, addr, val, mmu_idx, GETRA());
}
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
@@ -301,3 +504,16 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
#undef SDATA_TYPE
#undef USUFFIX
#undef SSUFFIX
+#undef BSWAP
+#undef TGT_BE
+#undef TGT_LE
+#undef CPU_BE
+#undef CPU_LE
+#undef helper_le_ld_name
+#undef helper_be_ld_name
+#undef helper_le_lds_name
+#undef helper_be_lds_name
+#undef helper_le_st_name
+#undef helper_be_st_name
+#undef helper_te_ld_name
+#undef helper_te_st_name
diff --git a/target-alpha/helper.h b/target-alpha/helper.h
index 732b701d53..5a0e78cefb 100644
--- a/target-alpha/helper.h
+++ b/target-alpha/helper.h
@@ -114,7 +114,7 @@ DEF_HELPER_FLAGS_1(tbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(tbis, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_1(tb_flush, TCG_CALL_NO_RWG, void, env)
-DEF_HELPER_1(halt, void, i64);
+DEF_HELPER_1(halt, void, i64)
DEF_HELPER_FLAGS_0(get_vmtime, TCG_CALL_NO_RWG, i64)
DEF_HELPER_FLAGS_0(get_walltime, TCG_CALL_NO_RWG, i64)
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 28ce4363f1..9cb8084057 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -140,10 +140,6 @@ void alpha_translate_init(void)
offsetof(CPUAlphaState, usp), "usp");
#endif
- /* register helpers */
-#define GEN_HELPER 2
-#include "helper.h"
-
done_init = 1;
}
diff --git a/target-arm/helper.h b/target-arm/helper.h
index 63ae13acff..cac9564f5f 100644
--- a/target-arm/helper.h
+++ b/target-arm/helper.h
@@ -247,10 +247,10 @@ DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32);
-DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32);
-DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32);
-DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64);
+DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64)
DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 998bde268d..5f003e785e 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -115,9 +115,6 @@ void arm_translate_init(void)
#endif
a64_translate_init();
-
-#define GEN_HELPER 2
-#include "helper.h"
}
static inline TCGv_i32 load_cpu_offset(int offset)
diff --git a/target-cris/helper.h b/target-cris/helper.h
index 8e8365cf69..0ac31f5670 100644
--- a/target-cris/helper.h
+++ b/target-cris/helper.h
@@ -4,14 +4,14 @@ DEF_HELPER_2(raise_exception, void, env, i32)
DEF_HELPER_2(tlb_flush_pid, void, env, i32)
DEF_HELPER_2(spc_write, void, env, i32)
DEF_HELPER_3(dump, void, i32, i32, i32)
-DEF_HELPER_1(rfe, void, env);
-DEF_HELPER_1(rfn, void, env);
+DEF_HELPER_1(rfe, void, env)
+DEF_HELPER_1(rfn, void, env)
DEF_HELPER_3(movl_sreg_reg, void, env, i32, i32)
DEF_HELPER_3(movl_reg_sreg, void, env, i32, i32)
-DEF_HELPER_FLAGS_1(lz, TCG_CALL_NO_SE, i32, i32);
-DEF_HELPER_FLAGS_4(btst, TCG_CALL_NO_SE, i32, env, i32, i32, i32);
+DEF_HELPER_FLAGS_1(lz, TCG_CALL_NO_SE, i32, i32)
+DEF_HELPER_FLAGS_4(btst, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
DEF_HELPER_FLAGS_4(evaluate_flags_muls, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
DEF_HELPER_FLAGS_4(evaluate_flags_mulu, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
diff --git a/target-cris/translate.c b/target-cris/translate.c
index 617e1b4242..5faa44c1ea 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -3480,9 +3480,6 @@ void cris_initialize_tcg(void)
{
int i;
-#define GEN_HELPER 2
-#include "helper.h"
-
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
cc_x = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUCRISState, cc_x), "cc_x");
diff --git a/target-i386/translate.c b/target-i386/translate.c
index be74ebc278..eb0ea93dbb 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -8261,10 +8261,6 @@ void optimize_flags_init(void)
cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUX86State, regs[R_EDI]), "edi");
#endif
-
- /* register helpers */
-#define GEN_HELPER 2
-#include "helper.h"
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
diff --git a/target-m68k/helper.c b/target-m68k/helper.c
index a8f32fc88b..a364eb1e5c 100644
--- a/target-m68k/helper.c
+++ b/target-m68k/helper.c
@@ -21,7 +21,7 @@
#include "cpu.h"
#include "exec/gdbstub.h"
-#include "helpers.h"
+#include "helper.h"
#define SIGNBIT (1u << 31)
diff --git a/target-m68k/helpers.h b/target-m68k/helper.h
index 2b024502ba..2b024502ba 100644
--- a/target-m68k/helpers.h
+++ b/target-m68k/helper.h
diff --git a/target-m68k/op_helper.c b/target-m68k/op_helper.c
index 30f7d8b1ab..bbbfd7f130 100644
--- a/target-m68k/op_helper.c
+++ b/target-m68k/op_helper.c
@@ -17,7 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cpu.h"
-#include "helpers.h"
+#include "helper.h"
#if defined(CONFIG_USER_ONLY)
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index 0be0a96732..f54b94a53f 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -23,9 +23,9 @@
#include "tcg-op.h"
#include "qemu/log.h"
-#include "helpers.h"
+#include "helper.h"
#define GEN_HELPER 1
-#include "helpers.h"
+#include "helper.h"
//#define DEBUG_DISPATCH 1
@@ -108,9 +108,6 @@ void m68k_tcg_init(void)
NULL_QREG = tcg_global_mem_new(TCG_AREG0, -4, "NULL");
store_dummy = tcg_global_mem_new(TCG_AREG0, -8, "NULL");
-
-#define GEN_HELPER 2
-#include "helpers.h"
}
static inline void qemu_assert(int cond, const char *msg)
diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c
index 0673176957..1b937b3f0d 100644
--- a/target-microblaze/translate.c
+++ b/target-microblaze/translate.c
@@ -2024,8 +2024,6 @@ void mb_tcg_init(void)
offsetof(CPUMBState, sregs[i]),
special_regnames[i]);
}
-#define GEN_HELPER 2
-#include "helper.h"
}
void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
diff --git a/target-mips/helper.h b/target-mips/helper.h
index ed75e2c9f2..1a8b86dea5 100644
--- a/target-mips/helper.h
+++ b/target-mips/helper.h
@@ -148,7 +148,7 @@ DEF_HELPER_2(mtc0_taghi, void, env, tl)
DEF_HELPER_2(mtc0_datahi, void, env, tl)
/* MIPS MT functions */
-DEF_HELPER_2(mftgpr, tl, env, i32);
+DEF_HELPER_2(mftgpr, tl, env, i32)
DEF_HELPER_2(mftlo, tl, env, i32)
DEF_HELPER_2(mfthi, tl, env, i32)
DEF_HELPER_2(mftacx, tl, env, i32)
@@ -165,11 +165,11 @@ DEF_HELPER_1(evpe, tl, env)
#endif /* !CONFIG_USER_ONLY */
/* microMIPS functions */
-DEF_HELPER_4(lwm, void, env, tl, tl, i32);
-DEF_HELPER_4(swm, void, env, tl, tl, i32);
+DEF_HELPER_4(lwm, void, env, tl, tl, i32)
+DEF_HELPER_4(swm, void, env, tl, tl, i32)
#ifdef TARGET_MIPS64
-DEF_HELPER_4(ldm, void, env, tl, tl, i32);
-DEF_HELPER_4(sdm, void, env, tl, tl, i32);
+DEF_HELPER_4(ldm, void, env, tl, tl, i32)
+DEF_HELPER_4(sdm, void, env, tl, tl, i32)
#endif
DEF_HELPER_2(fork, void, tl, tl)
@@ -615,7 +615,7 @@ DEF_HELPER_FLAGS_4(dmsubu, 0, void, tl, tl, i32, env)
DEF_HELPER_FLAGS_1(bitrev, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_3(insv, 0, tl, env, tl, tl)
#if defined(TARGET_MIPS64)
-DEF_HELPER_FLAGS_3(dinsv, 0, tl, env, tl, tl);
+DEF_HELPER_FLAGS_3(dinsv, 0, tl, env, tl, tl)
#endif
/* DSP Compare-Pick Sub-class insns */
diff --git a/target-mips/translate.c b/target-mips/translate.c
index dea3956c60..67f326b205 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -15886,10 +15886,6 @@ void mips_tcg_init(void)
offsetof(CPUMIPSState, active_fpu.fcr31),
"fcr31");
- /* register helpers */
-#define GEN_HELPER 2
-#include "helper.h"
-
inited = 1;
}
diff --git a/target-openrisc/translate.c b/target-openrisc/translate.c
index 723b77d3b4..8908a2e32b 100644
--- a/target-openrisc/translate.c
+++ b/target-openrisc/translate.c
@@ -110,8 +110,6 @@ void openrisc_translate_init(void)
offsetof(CPUOpenRISCState, gpr[i]),
regnames[i]);
}
-#define GEN_HELPER 2
-#include "helper.h"
}
/* Writeback SR_F transaltion-space to execution-space. */
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index 56814b501f..6d282bb32d 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -168,8 +168,8 @@ DEF_HELPER_3(vslo, void, avr, avr, avr)
DEF_HELPER_3(vsro, void, avr, avr, avr)
DEF_HELPER_3(vaddcuw, void, avr, avr, avr)
DEF_HELPER_3(vsubcuw, void, avr, avr, avr)
-DEF_HELPER_2(lvsl, void, avr, tl);
-DEF_HELPER_2(lvsr, void, avr, tl);
+DEF_HELPER_2(lvsl, void, avr, tl)
+DEF_HELPER_2(lvsr, void, avr, tl)
DEF_HELPER_4(vaddsbs, void, env, avr, avr, avr)
DEF_HELPER_4(vaddshs, void, env, avr, avr, avr)
DEF_HELPER_4(vaddsws, void, env, avr, avr, avr)
@@ -220,7 +220,7 @@ DEF_HELPER_5(vmsumuhs, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vmsumshm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vmsumshs, void, env, avr, avr, avr, avr)
DEF_HELPER_4(vmladduhm, void, avr, avr, avr, avr)
-DEF_HELPER_2(mtvscr, void, env, avr);
+DEF_HELPER_2(mtvscr, void, env, avr)
DEF_HELPER_3(lvebx, void, env, avr, tl)
DEF_HELPER_3(lvehx, void, env, avr, tl)
DEF_HELPER_3(lvewx, void, env, avr, tl)
@@ -349,7 +349,7 @@ DEF_HELPER_2(load_slb_vsid, tl, env, tl)
DEF_HELPER_FLAGS_1(slbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl)
#endif
-DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl);
+DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl)
DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl)
DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_NO_RWG_SE, tl, tl)
@@ -367,7 +367,7 @@ DEF_HELPER_3(divo, tl, env, tl, tl)
DEF_HELPER_3(divs, tl, env, tl, tl)
DEF_HELPER_3(divso, tl, env, tl, tl)
-DEF_HELPER_2(load_dcr, tl, env, tl);
+DEF_HELPER_2(load_dcr, tl, env, tl)
DEF_HELPER_3(store_dcr, void, env, tl, tl)
DEF_HELPER_2(load_dump_spr, void, env, i32)
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 9c59f69ee1..66c777174c 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -175,10 +175,6 @@ void ppc_translate_init(void)
cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUPPCState, access_type), "access_type");
- /* register helpers */
-#define GEN_HELPER 2
-#include "helper.h"
-
done_init = 1;
}
diff --git a/target-s390x/translate.c b/target-s390x/translate.c
index afe90eb8be..bc99a378a7 100644
--- a/target-s390x/translate.c
+++ b/target-s390x/translate.c
@@ -188,10 +188,6 @@ void s390x_translate_init(void)
offsetof(CPUS390XState, fregs[i].d),
cpu_reg_names[i + 16]);
}
-
- /* register helpers */
-#define GEN_HELPER 2
-#include "helper.h"
}
static TCGv_i64 load_reg(int reg)
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index c06b29f1dc..2272eb0beb 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -143,10 +143,6 @@ void sh4_translate_init(void)
offsetof(CPUSH4State, fregs[i]),
fregnames[i]);
- /* register helpers */
-#define GEN_HELPER 2
-#include "helper.h"
-
done_init = 1;
}
diff --git a/target-sparc/helper.h b/target-sparc/helper.h
index 15f73283fa..2a771b2093 100644
--- a/target-sparc/helper.h
+++ b/target-sparc/helper.h
@@ -103,7 +103,7 @@ DEF_HELPER_3(fmuls, f32, env, f32, f32)
DEF_HELPER_3(fdivs, f32, env, f32, f32)
DEF_HELPER_3(fsmuld, f64, env, f32, f32)
-DEF_HELPER_3(fdmulq, void, env, f64, f64);
+DEF_HELPER_3(fdmulq, void, env, f64, f64)
DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_NO_RWG_SE, f32, f32)
DEF_HELPER_2(fitod, f64, env, s32)
@@ -156,22 +156,22 @@ DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \
i32, i32, i32)
-VIS_HELPER(padd);
-VIS_HELPER(psub);
+VIS_HELPER(padd)
+VIS_HELPER(psub)
#define VIS_CMPHELPER(name) \
DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \
i64, i64, i64) \
DEF_HELPER_FLAGS_2(f##name##32, TCG_CALL_NO_RWG_SE, \
i64, i64, i64)
-VIS_CMPHELPER(cmpgt);
-VIS_CMPHELPER(cmpeq);
-VIS_CMPHELPER(cmple);
-VIS_CMPHELPER(cmpne);
+VIS_CMPHELPER(cmpgt)
+VIS_CMPHELPER(cmpeq)
+VIS_CMPHELPER(cmple)
+VIS_CMPHELPER(cmpne)
#endif
#undef F_HELPER_0_1
#undef VIS_HELPER
#undef VIS_CMPHELPER
-DEF_HELPER_1(compute_psr, void, env);
-DEF_HELPER_1(compute_C_icc, i32, env);
+DEF_HELPER_1(compute_psr, void, env)
+DEF_HELPER_1(compute_C_icc, i32, env)
#include "exec/def-helper.h"
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index 36615f1979..dce64c3c4a 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -5456,11 +5456,6 @@ void gen_intermediate_code_init(CPUSPARCState *env)
offsetof(CPUSPARCState, fpr[i]),
fregnames[i]);
}
-
- /* register helpers */
-
-#define GEN_HELPER 2
-#include "helper.h"
}
}
diff --git a/target-unicore32/translate.c b/target-unicore32/translate.c
index 1246895f86..4572890ffa 100644
--- a/target-unicore32/translate.c
+++ b/target-unicore32/translate.c
@@ -74,9 +74,6 @@ void uc32_translate_init(void)
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUUniCore32State, regs[i]), regnames[i]);
}
-
-#define GEN_HELPER 2
-#include "helper.h"
}
static int num_temps;
diff --git a/target-xtensa/translate.c b/target-xtensa/translate.c
index 24343bdf60..06641bb7d0 100644
--- a/target-xtensa/translate.c
+++ b/target-xtensa/translate.c
@@ -238,8 +238,6 @@ void xtensa_translate_init(void)
uregnames[i].name);
}
}
-#define GEN_HELPER 2
-#include "helper.h"
}
static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
diff --git a/tcg/README b/tcg/README
index 063aeb95ea..f1782123b7 100644
--- a/tcg/README
+++ b/tcg/README
@@ -412,30 +412,25 @@ current TB was linked to this TB. Otherwise execute the next
instructions. Only indices 0 and 1 are valid and tcg_gen_goto_tb may be issued
at most once with each slot index per TB.
-* qemu_ld8u t0, t1, flags
-qemu_ld8s t0, t1, flags
-qemu_ld16u t0, t1, flags
-qemu_ld16s t0, t1, flags
-qemu_ld32 t0, t1, flags
-qemu_ld32u t0, t1, flags
-qemu_ld32s t0, t1, flags
-qemu_ld64 t0, t1, flags
-
-Load data at the QEMU CPU address t1 into t0. t1 has the QEMU CPU address
-type. 'flags' contains the QEMU memory index (selects user or kernel access)
-for example.
-
-Note that "qemu_ld32" implies a 32-bit result, while "qemu_ld32u" and
-"qemu_ld32s" imply a 64-bit result appropriately extended from 32 bits.
-
-* qemu_st8 t0, t1, flags
-qemu_st16 t0, t1, flags
-qemu_st32 t0, t1, flags
-qemu_st64 t0, t1, flags
-
-Store the data t0 at the QEMU CPU Address t1. t1 has the QEMU CPU
-address type. 'flags' contains the QEMU memory index (selects user or
-kernel access) for example.
+* qemu_ld_i32/i64 t0, t1, flags, memidx
+* qemu_st_i32/i64 t0, t1, flags, memidx
+
+Load data at the guest address t1 into t0, or store data in t0 at guest
+address t1. The _i32/_i64 size applies to the size of the input/output
+register t0 only. The address t1 is always sized according to the guest,
+and the width of the memory operation is controlled by flags.
+
+Both t0 and t1 may be split into little-endian ordered pairs of registers
+if dealing with 64-bit quantities on a 32-bit host.
+
+The memidx selects the qemu tlb index to use (e.g. user or kernel access).
+The flags are the TCGMemOp bits, selecting the sign, width, and endianness
+of the memory access.
+
+For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a
+64-bit memory access specified in flags.
+
+*********
Note 1: Some shortcuts are defined when the last operand is known to be
a constant (e.g. addi for add, movi for mov).
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index 6379df1f68..04d7ae328d 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -10,6 +10,7 @@
* See the COPYING file in the top-level directory for details.
*/
+#include "tcg-be-ldst.h"
#include "qemu/bitops.h"
#ifndef NDEBUG
@@ -778,22 +779,24 @@ static inline void tcg_out_nop(TCGContext *s)
}
#ifdef CONFIG_SOFTMMU
-/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
- int mmu_idx) */
+/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
+ * int mmu_idx, uintptr_t ra)
+ */
static const void * const qemu_ld_helpers[4] = {
- helper_ldb_mmu,
- helper_ldw_mmu,
- helper_ldl_mmu,
- helper_ldq_mmu,
+ helper_ret_ldub_mmu,
+ helper_ret_lduw_mmu,
+ helper_ret_ldul_mmu,
+ helper_ret_ldq_mmu,
};
-/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
- uintxx_t val, int mmu_idx) */
+/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
+ * uintxx_t val, int mmu_idx, uintptr_t ra)
+ */
static const void * const qemu_st_helpers[4] = {
- helper_stb_mmu,
- helper_stw_mmu,
- helper_stl_mmu,
- helper_stq_mmu,
+ helper_ret_stb_mmu,
+ helper_ret_stw_mmu,
+ helper_ret_stl_mmu,
+ helper_ret_stq_mmu,
};
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
@@ -802,6 +805,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_movr(s, 1, TCG_REG_X0, TCG_AREG0);
tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X3, (tcg_target_long)lb->raddr);
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP,
(tcg_target_long)qemu_ld_helpers[lb->opc & 3]);
tcg_out_callr(s, TCG_REG_TMP);
@@ -822,6 +826,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg);
tcg_out_movr(s, 1, TCG_REG_X2, lb->datalo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X4, (tcg_target_long)lb->raddr);
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP,
(tcg_target_long)qemu_st_helpers[lb->opc & 3]);
tcg_out_callr(s, TCG_REG_TMP);
@@ -830,33 +835,13 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_goto(s, (tcg_target_long)lb->raddr);
}
-void tcg_out_tb_finalize(TCGContext *s)
-{
- int i;
- for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
- TCGLabelQemuLdst *label = &s->qemu_ldst_labels[i];
- if (label->is_ld) {
- tcg_out_qemu_ld_slow_path(s, label);
- } else {
- tcg_out_qemu_st_slow_path(s, label);
- }
- }
-}
-
static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc,
TCGReg data_reg, TCGReg addr_reg,
int mem_index,
uint8_t *raddr, uint8_t *label_ptr)
{
- int idx;
- TCGLabelQemuLdst *label;
-
- if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
- tcg_abort();
- }
+ TCGLabelQemuLdst *label = new_ldst_label(s);
- idx = s->nb_qemu_ldst_labels++;
- label = &s->qemu_ldst_labels[idx];
label->is_ld = is_ld;
label->opc = opc;
label->datalo_reg = data_reg;
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index d3a1bc2437..82ad919518 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -96,6 +96,8 @@ enum {
TCG_AREG0 = TCG_REG_X19,
};
+#define TCG_TARGET_HAS_new_ldst 0
+
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
__builtin___clear_cache((char *)start, (char *)stop);
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
index 622cc49aa7..c0e14661b2 100644
--- a/tcg/arm/tcg-target.c
+++ b/tcg/arm/tcg-target.c
@@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
+#include "tcg-be-ldst.h"
+
/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
#ifndef __ARM_ARCH
# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
@@ -1243,15 +1245,8 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc,
int addrhi_reg, int mem_index,
uint8_t *raddr, uint8_t *label_ptr)
{
- int idx;
- TCGLabelQemuLdst *label;
-
- if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
- tcg_abort();
- }
+ TCGLabelQemuLdst *label = new_ldst_label(s);
- idx = s->nb_qemu_ldst_labels++;
- label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
label->is_ld = is_ld;
label->opc = opc;
label->datalo_reg = data_reg;
@@ -1968,22 +1963,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
}
-#ifdef CONFIG_SOFTMMU
-/* Generate TB finalization at the end of block. */
-void tcg_out_tb_finalize(TCGContext *s)
-{
- int i;
- for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
- TCGLabelQemuLdst *label = &s->qemu_ldst_labels[i];
- if (label->is_ld) {
- tcg_out_qemu_ld_slow_path(s, label);
- } else {
- tcg_out_qemu_st_slow_path(s, label);
- }
- }
-}
-#endif /* SOFTMMU */
-
static const TCGTargetOpDef arm_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index 9482bfa993..25e1e2876f 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -85,6 +85,8 @@ extern bool use_idiv_instructions;
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
#define TCG_TARGET_HAS_rem_i32 0
+#define TCG_TARGET_HAS_new_ldst 0
+
extern bool tcg_target_deposit_valid(int ofs, int len);
#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid
diff --git a/tcg/hppa/tcg-target.c b/tcg/hppa/tcg-target.c
deleted file mode 100644
index 236b39c31f..0000000000
--- a/tcg/hppa/tcg-target.c
+++ /dev/null
@@ -1,1831 +0,0 @@
-/*
- * Tiny Code Generator for QEMU
- *
- * Copyright (c) 2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#if TCG_TARGET_REG_BITS != 32
-#error unsupported
-#endif
-
-#ifndef NDEBUG
-static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
- "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
- "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
- "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
- "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
-};
-#endif
-
-/* This is an 8 byte temp slot in the stack frame. */
-#define STACK_TEMP_OFS -16
-
-#ifdef CONFIG_USE_GUEST_BASE
-#define TCG_GUEST_BASE_REG TCG_REG_R16
-#else
-#define TCG_GUEST_BASE_REG TCG_REG_R0
-#endif
-
-static const int tcg_target_reg_alloc_order[] = {
- TCG_REG_R4,
- TCG_REG_R5,
- TCG_REG_R6,
- TCG_REG_R7,
- TCG_REG_R8,
- TCG_REG_R9,
- TCG_REG_R10,
- TCG_REG_R11,
- TCG_REG_R12,
- TCG_REG_R13,
-
- TCG_REG_R17,
- TCG_REG_R14,
- TCG_REG_R15,
- TCG_REG_R16,
-
- TCG_REG_R26,
- TCG_REG_R25,
- TCG_REG_R24,
- TCG_REG_R23,
-
- TCG_REG_RET0,
- TCG_REG_RET1,
-};
-
-static const int tcg_target_call_iarg_regs[4] = {
- TCG_REG_R26,
- TCG_REG_R25,
- TCG_REG_R24,
- TCG_REG_R23,
-};
-
-static const int tcg_target_call_oarg_regs[2] = {
- TCG_REG_RET0,
- TCG_REG_RET1,
-};
-
-/* True iff val fits a signed field of width BITS. */
-static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
-{
- return (val << ((sizeof(tcg_target_long) * 8 - bits))
- >> (sizeof(tcg_target_long) * 8 - bits)) == val;
-}
-
-/* True iff depi can be used to compute (reg | MASK).
- Accept a bit pattern like:
- 0....01....1
- 1....10....0
- 0..01..10..0
- Copied from gcc sources. */
-static inline int or_mask_p(tcg_target_ulong mask)
-{
- if (mask == 0 || mask == -1) {
- return 0;
- }
- mask += mask & -mask;
- return (mask & (mask - 1)) == 0;
-}
-
-/* True iff depi or extru can be used to compute (reg & mask).
- Accept a bit pattern like these:
- 0....01....1
- 1....10....0
- 1..10..01..1
- Copied from gcc sources. */
-static inline int and_mask_p(tcg_target_ulong mask)
-{
- return or_mask_p(~mask);
-}
-
-static int low_sign_ext(int val, int len)
-{
- return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
-}
-
-static int reassemble_12(int as12)
-{
- return (((as12 & 0x800) >> 11) |
- ((as12 & 0x400) >> 8) |
- ((as12 & 0x3ff) << 3));
-}
-
-static int reassemble_17(int as17)
-{
- return (((as17 & 0x10000) >> 16) |
- ((as17 & 0x0f800) << 5) |
- ((as17 & 0x00400) >> 8) |
- ((as17 & 0x003ff) << 3));
-}
-
-static int reassemble_21(int as21)
-{
- return (((as21 & 0x100000) >> 20) |
- ((as21 & 0x0ffe00) >> 8) |
- ((as21 & 0x000180) << 7) |
- ((as21 & 0x00007c) << 14) |
- ((as21 & 0x000003) << 12));
-}
-
-/* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
- such relocations are simply fully handled by the assembler. */
-#define R_PARISC_PCREL12F R_PARISC_NONE
-
-static void patch_reloc(uint8_t *code_ptr, int type,
- intptr_t value, intptr_t addend)
-{
- uint32_t *insn_ptr = (uint32_t *)code_ptr;
- uint32_t insn = *insn_ptr;
- intptr_t pcrel;
-
- value += addend;
- pcrel = (value - ((intptr_t)code_ptr + 8)) >> 2;
-
- switch (type) {
- case R_PARISC_PCREL12F:
- assert(check_fit_tl(pcrel, 12));
- /* ??? We assume all patches are forward. See tcg_out_brcond
- re setting the NUL bit on the branch and eliding the nop. */
- assert(pcrel >= 0);
- insn &= ~0x1ffdu;
- insn |= reassemble_12(pcrel);
- break;
- case R_PARISC_PCREL17F:
- assert(check_fit_tl(pcrel, 17));
- insn &= ~0x1f1ffdu;
- insn |= reassemble_17(pcrel);
- break;
- default:
- tcg_abort();
- }
-
- *insn_ptr = insn;
-}
-
-/* parse target specific constraints */
-static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
-{
- const char *ct_str;
-
- ct_str = *pct_str;
- switch (ct_str[0]) {
- case 'r':
- ct->ct |= TCG_CT_REG;
- tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
- break;
- case 'L': /* qemu_ld/st constraint */
- ct->ct |= TCG_CT_REG;
- tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
- break;
- case 'Z':
- ct->ct |= TCG_CT_CONST_0;
- break;
- case 'I':
- ct->ct |= TCG_CT_CONST_S11;
- break;
- case 'J':
- ct->ct |= TCG_CT_CONST_S5;
- break;
- case 'K':
- ct->ct |= TCG_CT_CONST_MS11;
- break;
- case 'M':
- ct->ct |= TCG_CT_CONST_AND;
- break;
- case 'O':
- ct->ct |= TCG_CT_CONST_OR;
- break;
- default:
- return -1;
- }
- ct_str++;
- *pct_str = ct_str;
- return 0;
-}
-
-/* test if a constant matches the constraint */
-static int tcg_target_const_match(tcg_target_long val,
- const TCGArgConstraint *arg_ct)
-{
- int ct = arg_ct->ct;
- if (ct & TCG_CT_CONST) {
- return 1;
- } else if (ct & TCG_CT_CONST_0) {
- return val == 0;
- } else if (ct & TCG_CT_CONST_S5) {
- return check_fit_tl(val, 5);
- } else if (ct & TCG_CT_CONST_S11) {
- return check_fit_tl(val, 11);
- } else if (ct & TCG_CT_CONST_MS11) {
- return check_fit_tl(-val, 11);
- } else if (ct & TCG_CT_CONST_AND) {
- return and_mask_p(val);
- } else if (ct & TCG_CT_CONST_OR) {
- return or_mask_p(val);
- }
- return 0;
-}
-
-#define INSN_OP(x) ((x) << 26)
-#define INSN_EXT3BR(x) ((x) << 13)
-#define INSN_EXT3SH(x) ((x) << 10)
-#define INSN_EXT4(x) ((x) << 6)
-#define INSN_EXT5(x) (x)
-#define INSN_EXT6(x) ((x) << 6)
-#define INSN_EXT7(x) ((x) << 6)
-#define INSN_EXT8A(x) ((x) << 6)
-#define INSN_EXT8B(x) ((x) << 5)
-#define INSN_T(x) (x)
-#define INSN_R1(x) ((x) << 16)
-#define INSN_R2(x) ((x) << 21)
-#define INSN_DEP_LEN(x) (32 - (x))
-#define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
-#define INSN_SHDEP_P(x) ((x) << 5)
-#define INSN_COND(x) ((x) << 13)
-#define INSN_IM11(x) low_sign_ext(x, 11)
-#define INSN_IM14(x) low_sign_ext(x, 14)
-#define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
-
-#define COND_NEVER 0
-#define COND_EQ 1
-#define COND_LT 2
-#define COND_LE 3
-#define COND_LTU 4
-#define COND_LEU 5
-#define COND_SV 6
-#define COND_OD 7
-#define COND_FALSE 8
-
-#define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
-#define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
-#define INSN_ADDI (INSN_OP(0x2d))
-#define INSN_ADDIL (INSN_OP(0x0a))
-#define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
-#define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
-#define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
-#define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
-#define INSN_COMICLR (INSN_OP(0x24))
-#define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
-#define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
-#define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
-#define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
-#define INSN_LDIL (INSN_OP(0x08))
-#define INSN_LDO (INSN_OP(0x0d))
-#define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
-#define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
-#define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
-#define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
-#define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
-#define INSN_SUBI (INSN_OP(0x25))
-#define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
-#define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
-#define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
-#define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
-#define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
-#define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
-
-#define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
-#define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
-#define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
-#define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
-#define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
-#define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
-
-#define INSN_LDB (INSN_OP(0x10))
-#define INSN_LDH (INSN_OP(0x11))
-#define INSN_LDW (INSN_OP(0x12))
-#define INSN_LDWM (INSN_OP(0x13))
-#define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
-
-#define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
-#define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
-#define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
-
-#define INSN_STB (INSN_OP(0x18))
-#define INSN_STH (INSN_OP(0x19))
-#define INSN_STW (INSN_OP(0x1a))
-#define INSN_STWM (INSN_OP(0x1b))
-#define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
-
-#define INSN_COMBT (INSN_OP(0x20))
-#define INSN_COMBF (INSN_OP(0x22))
-#define INSN_COMIBT (INSN_OP(0x21))
-#define INSN_COMIBF (INSN_OP(0x23))
-
-/* supplied by libgcc */
-extern void *__canonicalize_funcptr_for_compare(const void *);
-
-static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
-{
- /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
- but hppa-dis.c is unaware of this definition */
- if (ret != arg) {
- tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
- | INSN_R2(TCG_REG_R0));
- }
-}
-
-static void tcg_out_movi(TCGContext *s, TCGType type,
- TCGReg ret, tcg_target_long arg)
-{
- if (check_fit_tl(arg, 14)) {
- tcg_out32(s, INSN_LDO | INSN_R1(ret)
- | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
- } else {
- uint32_t hi, lo;
- hi = arg >> 11;
- lo = arg & 0x7ff;
-
- tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
- if (lo) {
- tcg_out32(s, INSN_LDO | INSN_R1(ret)
- | INSN_R2(ret) | INSN_IM14(lo));
- }
- }
-}
-
-static void tcg_out_ldst(TCGContext *s, int ret, int addr,
- tcg_target_long offset, int op)
-{
- if (!check_fit_tl(offset, 14)) {
- uint32_t hi, lo, op;
-
- hi = offset >> 11;
- lo = offset & 0x7ff;
-
- if (addr == TCG_REG_R0) {
- op = INSN_LDIL | INSN_R2(TCG_REG_R1);
- } else {
- op = INSN_ADDIL | INSN_R2(addr);
- }
- tcg_out32(s, op | reassemble_21(hi));
-
- addr = TCG_REG_R1;
- offset = lo;
- }
-
- if (ret != addr || offset != 0 || op != INSN_LDO) {
- tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
- }
-}
-
-/* This function is required by tcg.c. */
-static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
- TCGReg arg1, intptr_t arg2)
-{
- tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
-}
-
-/* This function is required by tcg.c. */
-static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg ret,
- TCGReg arg1, intptr_t arg2)
-{
- tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
-}
-
-static void tcg_out_ldst_index(TCGContext *s, int data,
- int base, int index, int op)
-{
- tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
-}
-
-static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
- tcg_target_long val)
-{
- tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
-}
-
-/* This function is required by tcg.c. */
-static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
-{
- tcg_out_addi2(s, reg, reg, val);
-}
-
-static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
-{
- tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
-}
-
-static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
- tcg_target_long val, int op)
-{
- assert(check_fit_tl(val, 11));
- tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
-}
-
-static inline void tcg_out_nop(TCGContext *s)
-{
- tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
-}
-
-static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
-{
- tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
-}
-
-/* Extract LEN bits at position OFS from ARG and place in RET.
- Note that here the bit ordering is reversed from the PA-RISC
- standard, such that the right-most bit is 0. */
-static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
- unsigned ofs, unsigned len, int sign)
-{
- assert(ofs < 32 && len <= 32 - ofs);
- tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
- | INSN_R1(ret) | INSN_R2(arg)
- | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
-}
-
-/* Likewise with OFS interpreted little-endian. */
-static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
- unsigned ofs, unsigned len)
-{
- assert(ofs < 32 && len <= 32 - ofs);
- tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
- | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
-}
-
-static inline void tcg_out_depi(TCGContext *s, int ret, int arg,
- unsigned ofs, unsigned len)
-{
- assert(ofs < 32 && len <= 32 - ofs);
- tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(arg)
- | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
-}
-
-static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
- unsigned count)
-{
- assert(count < 32);
- tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
- | INSN_SHDEP_CP(count));
-}
-
-static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
-{
- tcg_out_mtctl_sar(s, creg);
- tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
-}
-
-static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
-{
- int bs0, bs1;
-
- /* Note that the argument is constrained to match or_mask_p. */
- for (bs0 = 0; bs0 < 32; bs0++) {
- if ((m & (1u << bs0)) != 0) {
- break;
- }
- }
- for (bs1 = bs0; bs1 < 32; bs1++) {
- if ((m & (1u << bs1)) == 0) {
- break;
- }
- }
- assert(bs1 == 32 || (1ul << bs1) > m);
-
- tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
- tcg_out_depi(s, ret, -1, bs0, bs1 - bs0);
-}
-
-static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
-{
- int ls0, ls1, ms0;
-
- /* Note that the argument is constrained to match and_mask_p. */
- for (ls0 = 0; ls0 < 32; ls0++) {
- if ((m & (1u << ls0)) == 0) {
- break;
- }
- }
- for (ls1 = ls0; ls1 < 32; ls1++) {
- if ((m & (1u << ls1)) != 0) {
- break;
- }
- }
- for (ms0 = ls1; ms0 < 32; ms0++) {
- if ((m & (1u << ms0)) == 0) {
- break;
- }
- }
- assert (ms0 == 32);
-
- if (ls1 == 32) {
- tcg_out_extr(s, ret, arg, 0, ls0, 0);
- } else {
- tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
- tcg_out_depi(s, ret, 0, ls0, ls1 - ls0);
- }
-}
-
-static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
-{
- tcg_out_extr(s, ret, arg, 0, 8, 1);
-}
-
-static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
-{
- tcg_out_extr(s, ret, arg, 0, 16, 1);
-}
-
-static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
-{
- count &= 31;
- tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
- | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
-}
-
-static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
-{
- tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
- tcg_out_mtctl_sar(s, TCG_REG_R20);
- tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
-}
-
-static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
-{
- count &= 31;
- tcg_out_extr(s, ret, arg, count, 32 - count, 0);
-}
-
-static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
-{
- tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
-}
-
-static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
-{
- count &= 31;
- tcg_out_extr(s, ret, arg, count, 32 - count, 1);
-}
-
-static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
-{
- tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
- tcg_out_mtctl_sar(s, TCG_REG_R20);
- tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
-}
-
-static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
-{
- count &= 31;
- tcg_out_shd(s, ret, arg, arg, 32 - count);
-}
-
-static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
-{
- tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
- tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
-}
-
-static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
-{
- count &= 31;
- tcg_out_shd(s, ret, arg, arg, count);
-}
-
-static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
-{
- tcg_out_vshd(s, ret, arg, arg, creg);
-}
-
-static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
-{
- if (ret != arg) {
- tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg = xxAB */
- }
- tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
- tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
-}
-
-static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
-{
- /* arg = ABCD */
- tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
- tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
- tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
-}
-
-static void tcg_out_call(TCGContext *s, const void *func)
-{
- tcg_target_long val, hi, lo, disp;
-
- val = (uint32_t)__canonicalize_funcptr_for_compare(func);
- disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
-
- if (check_fit_tl(disp, 17)) {
- tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
- } else {
- hi = val >> 11;
- lo = val & 0x7ff;
-
- tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
- tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
- | reassemble_17(lo >> 2));
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31);
- }
-}
-
-static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
- int arg1, int arg2)
-{
- /* Store both words into the stack for copy to the FPU. */
- tcg_out_ldst(s, arg1, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_STW);
- tcg_out_ldst(s, arg2, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, INSN_STW);
-
- /* Load both words into the FPU at the same time. We get away
- with this because we can address the left and right half of the
- FPU registers individually once loaded. */
- /* fldds stack_temp(sp),fr22 */
- tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_CALL_STACK)
- | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
-
- /* xmpyu fr22r,fr22,fr22 */
- tcg_out32(s, 0x3ad64796);
-
- /* Store the 64-bit result back into the stack. */
- /* fstds stack_temp(sp),fr22 */
- tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_CALL_STACK)
- | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
-
- /* Load the pieces of the result that the caller requested. */
- if (reth) {
- tcg_out_ldst(s, reth, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_LDW);
- }
- if (retl) {
- tcg_out_ldst(s, retl, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4,
- INSN_LDW);
- }
-}
-
-static void tcg_out_add2(TCGContext *s, int destl, int desth,
- int al, int ah, int bl, int bh, int blconst)
-{
- int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
-
- if (blconst) {
- tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
- } else {
- tcg_out_arith(s, tmp, al, bl, INSN_ADD);
- }
- tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
-
- tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
-}
-
-static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
- int bl, int bh, int alconst, int blconst)
-{
- int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
-
- if (alconst) {
- if (blconst) {
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
- bl = TCG_REG_R20;
- }
- tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
- } else if (blconst) {
- tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
- } else {
- tcg_out_arith(s, tmp, al, bl, INSN_SUB);
- }
- tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
-
- tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
-}
-
-static void tcg_out_branch(TCGContext *s, int label_index, int nul)
-{
- TCGLabel *l = &s->labels[label_index];
- uint32_t op = nul ? INSN_BL_N : INSN_BL;
-
- if (l->has_value) {
- tcg_target_long val = l->u.value;
-
- val -= (tcg_target_long)s->code_ptr + 8;
- val >>= 2;
- assert(check_fit_tl(val, 17));
-
- tcg_out32(s, op | reassemble_17(val));
- } else {
- /* We need to keep the offset unchanged for retranslation. */
- uint32_t old_insn = *(uint32_t *)s->code_ptr;
-
- tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
- tcg_out32(s, op | (old_insn & 0x1f1ffdu));
- }
-}
-
-static const uint8_t tcg_cond_to_cmp_cond[] =
-{
- [TCG_COND_EQ] = COND_EQ,
- [TCG_COND_NE] = COND_EQ | COND_FALSE,
- [TCG_COND_LT] = COND_LT,
- [TCG_COND_GE] = COND_LT | COND_FALSE,
- [TCG_COND_LE] = COND_LE,
- [TCG_COND_GT] = COND_LE | COND_FALSE,
- [TCG_COND_LTU] = COND_LTU,
- [TCG_COND_GEU] = COND_LTU | COND_FALSE,
- [TCG_COND_LEU] = COND_LEU,
- [TCG_COND_GTU] = COND_LEU | COND_FALSE,
-};
-
-static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
- TCGArg c2, int c2const, int label_index)
-{
- TCGLabel *l = &s->labels[label_index];
- int op, pacond;
-
- /* Note that COMIB operates as if the immediate is the first
- operand. We model brcond with the immediate in the second
- to better match what targets are likely to give us. For
- consistency, model COMB with reversed operands as well. */
- pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
-
- if (c2const) {
- op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
- op |= INSN_IM5(c2);
- } else {
- op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
- op |= INSN_R1(c2);
- }
- op |= INSN_R2(c1);
- op |= INSN_COND(pacond & 7);
-
- if (l->has_value) {
- tcg_target_long val = l->u.value;
-
- val -= (tcg_target_long)s->code_ptr + 8;
- val >>= 2;
- assert(check_fit_tl(val, 12));
-
- /* ??? Assume that all branches to defined labels are backward.
- Which means that if the nul bit is set, the delay slot is
- executed if the branch is taken, and not executed in fallthru. */
- tcg_out32(s, op | reassemble_12(val));
- tcg_out_nop(s);
- } else {
- /* We need to keep the offset unchanged for retranslation. */
- uint32_t old_insn = *(uint32_t *)s->code_ptr;
-
- tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
- /* ??? Assume that all branches to undefined labels are forward.
- Which means that if the nul bit is set, the delay slot is
- not executed if the branch is taken, which is what we want. */
- tcg_out32(s, op | 2 | (old_insn & 0x1ffdu));
- }
-}
-
-static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
- TCGArg c1, TCGArg c2, int c2const)
-{
- int op, pacond;
-
- /* Note that COMICLR operates as if the immediate is the first
- operand. We model setcond with the immediate in the second
- to better match what targets are likely to give us. For
- consistency, model COMCLR with reversed operands as well. */
- pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
-
- if (c2const) {
- op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
- } else {
- op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
- }
- op |= INSN_COND(pacond & 7);
- op |= pacond & COND_FALSE ? 1 << 12 : 0;
-
- tcg_out32(s, op);
-}
-
-static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
- TCGArg bl, int blconst, TCGArg bh, int bhconst,
- int label_index)
-{
- switch (cond) {
- case TCG_COND_EQ:
- tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, al, bl, blconst);
- tcg_out_brcond(s, TCG_COND_EQ, ah, bh, bhconst, label_index);
- break;
- case TCG_COND_NE:
- tcg_out_brcond(s, TCG_COND_NE, al, bl, blconst, label_index);
- tcg_out_brcond(s, TCG_COND_NE, ah, bh, bhconst, label_index);
- break;
- default:
- tcg_out_brcond(s, tcg_high_cond(cond), ah, bh, bhconst, label_index);
- tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
- tcg_out_brcond(s, tcg_unsigned_cond(cond),
- al, bl, blconst, label_index);
- break;
- }
-}
-
-static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
- TCGArg c1, TCGArg c2, int c2const)
-{
- tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
- tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
-}
-
-static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
- TCGArg al, TCGArg ah, TCGArg bl, int blconst,
- TCGArg bh, int bhconst)
-{
- int scratch = TCG_REG_R20;
-
- /* Note that the low parts are fully consumed before scratch is set. */
- if (ret != ah && (bhconst || ret != bh)) {
- scratch = ret;
- }
-
- switch (cond) {
- case TCG_COND_EQ:
- case TCG_COND_NE:
- tcg_out_setcond(s, cond, scratch, al, bl, blconst);
- tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
- tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
- break;
-
- case TCG_COND_GE:
- case TCG_COND_GEU:
- case TCG_COND_LT:
- case TCG_COND_LTU:
- /* Optimize compares with low part zero. */
- if (bl == 0) {
- tcg_out_setcond(s, cond, ret, ah, bh, bhconst);
- return;
- }
- /* FALLTHRU */
-
- case TCG_COND_LE:
- case TCG_COND_LEU:
- case TCG_COND_GT:
- case TCG_COND_GTU:
- /* <= : ah < bh | (ah == bh && al <= bl) */
- tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
- tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
- tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
- tcg_out_comclr(s, tcg_invert_cond(tcg_high_cond(cond)),
- TCG_REG_R0, ah, bh, bhconst);
- tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
- break;
-
- default:
- tcg_abort();
- }
-
- tcg_out_mov(s, TCG_TYPE_I32, ret, scratch);
-}
-
-static void tcg_out_movcond(TCGContext *s, int cond, TCGArg ret,
- TCGArg c1, TCGArg c2, int c2const,
- TCGArg v1, int v1const)
-{
- tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, c1, c2, c2const);
- if (v1const) {
- tcg_out_movi(s, TCG_TYPE_I32, ret, v1);
- } else {
- tcg_out_mov(s, TCG_TYPE_I32, ret, v1);
- }
-}
-
-#if defined(CONFIG_SOFTMMU)
-/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
- int mmu_idx) */
-static const void * const qemu_ld_helpers[4] = {
- helper_ldb_mmu,
- helper_ldw_mmu,
- helper_ldl_mmu,
- helper_ldq_mmu,
-};
-
-/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
- uintxx_t val, int mmu_idx) */
-static const void * const qemu_st_helpers[4] = {
- helper_stb_mmu,
- helper_stw_mmu,
- helper_stl_mmu,
- helper_stq_mmu,
-};
-
-/* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
- the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
- TLB for the memory index. The return value is the offset from ENV
- contained in R1 afterward (to be used when loading ADDEND); if the
- return value is 0, R1 is not used. */
-
-static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
- int addrhi, int s_bits, int lab_miss, int offset)
-{
- int ret;
-
- /* Extracting the index into the TLB. The "normal C operation" is
- r1 = addr_reg >> TARGET_PAGE_BITS;
- r1 &= CPU_TLB_SIZE - 1;
- r1 <<= CPU_TLB_ENTRY_BITS;
- What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
- and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
- operations with an EXTRU. Unfortunately, the current value of
- CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
- add that follows. */
- tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
- tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
- tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
-
- /* Make sure that both the addr_{read,write} and addend can be
- read with a 14-bit offset from the same base register. */
- if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
- ret = 0;
- } else {
- ret = (offset + 0x400) & ~0x7ff;
- offset = ret - offset;
- tcg_out_addi2(s, TCG_REG_R1, r1, ret);
- r1 = TCG_REG_R1;
- }
-
- /* Load the entry from the computed slot. */
- if (TARGET_LONG_BITS == 64) {
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
- } else {
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
- }
-
- /* Compute the value that ought to appear in the TLB for a hit, namely,
- the page of the address. We include the low N bits of the address
- to catch unaligned accesses and force them onto the slow path. Do
- this computation after having issued the load from the TLB slot to
- give the load time to complete. */
- tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
-
- /* If not equal, jump to lab_miss. */
- if (TARGET_LONG_BITS == 64) {
- tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
- r0, 0, addrhi, 0, lab_miss);
- } else {
- tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
- }
-
- return ret;
-}
-
-static int tcg_out_arg_reg32(TCGContext *s, int argno, TCGArg v, bool vconst)
-{
- if (argno < 4) {
- if (vconst) {
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
- } else {
- tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
- }
- } else {
- if (vconst && v != 0) {
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, v);
- v = TCG_REG_R20;
- }
- tcg_out_st(s, TCG_TYPE_I32, v, TCG_REG_CALL_STACK,
- TCG_TARGET_CALL_STACK_OFFSET - ((argno - 3) * 4));
- }
- return argno + 1;
-}
-
-static int tcg_out_arg_reg64(TCGContext *s, int argno, TCGArg vl, TCGArg vh)
-{
- /* 64-bit arguments must go in even reg pairs and stack slots. */
- if (argno & 1) {
- argno++;
- }
- argno = tcg_out_arg_reg32(s, argno, vl, false);
- argno = tcg_out_arg_reg32(s, argno, vh, false);
- return argno;
-}
-#endif
-
-static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
- int addr_reg, int addend_reg, int opc)
-{
-#ifdef TARGET_WORDS_BIGENDIAN
- const int bswap = 0;
-#else
- const int bswap = 1;
-#endif
-
- switch (opc) {
- case 0:
- tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
- break;
- case 0 | 4:
- tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
- tcg_out_ext8s(s, datalo_reg, datalo_reg);
- break;
- case 1:
- tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
- if (bswap) {
- tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
- }
- break;
- case 1 | 4:
- tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
- if (bswap) {
- tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
- } else {
- tcg_out_ext16s(s, datalo_reg, datalo_reg);
- }
- break;
- case 2:
- tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
- if (bswap) {
- tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
- }
- break;
- case 3:
- if (bswap) {
- int t = datahi_reg;
- datahi_reg = datalo_reg;
- datalo_reg = t;
- }
- /* We can't access the low-part with a reg+reg addressing mode,
- so perform the addition now and use reg_ofs addressing mode. */
- if (addend_reg != TCG_REG_R0) {
- tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
- addr_reg = TCG_REG_R20;
- }
- /* Make sure not to clobber the base register. */
- if (datahi_reg == addr_reg) {
- tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
- tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
- } else {
- tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
- tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
- }
- if (bswap) {
- tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
- tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
- }
- break;
- default:
- tcg_abort();
- }
-}
-
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
-{
- int datalo_reg = *args++;
- /* Note that datahi_reg is only used for 64-bit loads. */
- int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
- int addrlo_reg = *args++;
-
-#if defined(CONFIG_SOFTMMU)
- /* Note that addrhi_reg is only used for 64-bit guests. */
- int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
- int mem_index = *args;
- int lab1, lab2, argno, offset;
-
- lab1 = gen_new_label();
- lab2 = gen_new_label();
-
- offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
- offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
- addrhi_reg, opc & 3, lab1, offset);
-
- /* TLB Hit. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
- (offset ? TCG_REG_R1 : TCG_REG_R25),
- offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
- tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
- TCG_REG_R20, opc);
- tcg_out_branch(s, lab2, 1);
-
- /* TLB Miss. */
- /* label1: */
- tcg_out_label(s, lab1, s->code_ptr);
-
- argno = 0;
- argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
- if (TARGET_LONG_BITS == 64) {
- argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
- } else {
- argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
- }
- argno = tcg_out_arg_reg32(s, argno, mem_index, true);
-
- tcg_out_call(s, qemu_ld_helpers[opc & 3]);
-
- switch (opc) {
- case 0:
- tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
- break;
- case 0 | 4:
- tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
- break;
- case 1:
- tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
- break;
- case 1 | 4:
- tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
- break;
- case 2:
- case 2 | 4:
- tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0);
- break;
- case 3:
- tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0);
- tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1);
- break;
- default:
- tcg_abort();
- }
-
- /* label2: */
- tcg_out_label(s, lab2, s->code_ptr);
-#else
- tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
- (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
-#endif
-}
-
-static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg,
- int datahi_reg, int addr_reg, int opc)
-{
-#ifdef TARGET_WORDS_BIGENDIAN
- const int bswap = 0;
-#else
- const int bswap = 1;
-#endif
-
- switch (opc) {
- case 0:
- tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
- break;
- case 1:
- if (bswap) {
- tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
- datalo_reg = TCG_REG_R20;
- }
- tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
- break;
- case 2:
- if (bswap) {
- tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
- datalo_reg = TCG_REG_R20;
- }
- tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
- break;
- case 3:
- if (bswap) {
- tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
- tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
- datahi_reg = TCG_REG_R20;
- datalo_reg = TCG_REG_R23;
- }
- tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
- tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
- break;
- default:
- tcg_abort();
- }
-
-}
-
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
-{
- int datalo_reg = *args++;
- /* Note that datahi_reg is only used for 64-bit loads. */
- int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
- int addrlo_reg = *args++;
-
-#if defined(CONFIG_SOFTMMU)
- /* Note that addrhi_reg is only used for 64-bit guests. */
- int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
- int mem_index = *args;
- int lab1, lab2, argno, next, offset;
-
- lab1 = gen_new_label();
- lab2 = gen_new_label();
-
- offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
- offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
- addrhi_reg, opc, lab1, offset);
-
- /* TLB Hit. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
- (offset ? TCG_REG_R1 : TCG_REG_R25),
- offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
-
- /* There are no indexed stores, so we must do this addition explitly.
- Careful to avoid R20, which is used for the bswaps to follow. */
- tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
- tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
- tcg_out_branch(s, lab2, 1);
-
- /* TLB Miss. */
- /* label1: */
- tcg_out_label(s, lab1, s->code_ptr);
-
- argno = 0;
- argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
- if (TARGET_LONG_BITS == 64) {
- argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
- } else {
- argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
- }
-
- next = (argno < 4 ? tcg_target_call_iarg_regs[argno] : TCG_REG_R20);
- switch(opc) {
- case 0:
- tcg_out_andi(s, next, datalo_reg, 0xff);
- argno = tcg_out_arg_reg32(s, argno, next, false);
- break;
- case 1:
- tcg_out_andi(s, next, datalo_reg, 0xffff);
- argno = tcg_out_arg_reg32(s, argno, next, false);
- break;
- case 2:
- argno = tcg_out_arg_reg32(s, argno, datalo_reg, false);
- break;
- case 3:
- argno = tcg_out_arg_reg64(s, argno, datalo_reg, datahi_reg);
- break;
- default:
- tcg_abort();
- }
- argno = tcg_out_arg_reg32(s, argno, mem_index, true);
-
- tcg_out_call(s, qemu_st_helpers[opc]);
-
- /* label2: */
- tcg_out_label(s, lab2, s->code_ptr);
-#else
- /* There are no indexed stores, so if GUEST_BASE is set we must do
- the add explicitly. Careful to avoid R20, which is used for the
- bswaps to follow. */
- if (GUEST_BASE != 0) {
- tcg_out_arith(s, TCG_REG_R31, addrlo_reg,
- TCG_GUEST_BASE_REG, INSN_ADDL);
- addrlo_reg = TCG_REG_R31;
- }
- tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
-#endif
-}
-
-static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
-{
- if (!check_fit_tl(arg, 14)) {
- uint32_t hi, lo;
- hi = arg & ~0x7ff;
- lo = arg & 0x7ff;
- if (lo) {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
- tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
- tcg_out_addi(s, TCG_REG_RET0, lo);
- return;
- }
- arg = hi;
- }
- tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
-}
-
-static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
-{
- if (s->tb_jmp_offset) {
- /* direct jump method */
- fprintf(stderr, "goto_tb direct\n");
- tcg_abort();
- } else {
- /* indirect jump method */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
- (tcg_target_long)(s->tb_next + arg));
- tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
- }
- s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
-}
-
-static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
- const int *const_args)
-{
- switch (opc) {
- case INDEX_op_exit_tb:
- tcg_out_exit_tb(s, args[0]);
- break;
- case INDEX_op_goto_tb:
- tcg_out_goto_tb(s, args[0]);
- break;
-
- case INDEX_op_call:
- if (const_args[0]) {
- tcg_out_call(s, (void *)args[0]);
- } else {
- /* ??? FIXME: the value in the register in args[0] is almost
- certainly a procedure descriptor, not a code address. We
- probably need to use the millicode $$dyncall routine. */
- tcg_abort();
- }
- break;
-
- case INDEX_op_br:
- tcg_out_branch(s, args[0], 1);
- break;
-
- case INDEX_op_movi_i32:
- tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
- break;
-
- case INDEX_op_ld8u_i32:
- tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
- break;
- case INDEX_op_ld8s_i32:
- tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
- tcg_out_ext8s(s, args[0], args[0]);
- break;
- case INDEX_op_ld16u_i32:
- tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
- break;
- case INDEX_op_ld16s_i32:
- tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
- tcg_out_ext16s(s, args[0], args[0]);
- break;
- case INDEX_op_ld_i32:
- tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
- break;
-
- case INDEX_op_st8_i32:
- tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
- break;
- case INDEX_op_st16_i32:
- tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
- break;
- case INDEX_op_st_i32:
- tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
- break;
-
- case INDEX_op_add_i32:
- if (const_args[2]) {
- tcg_out_addi2(s, args[0], args[1], args[2]);
- } else {
- tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
- }
- break;
-
- case INDEX_op_sub_i32:
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
- } else {
- /* Recall that SUBI is a reversed subtract. */
- tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
- }
- } else if (const_args[2]) {
- tcg_out_addi2(s, args[0], args[1], -args[2]);
- } else {
- tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
- }
- break;
-
- case INDEX_op_and_i32:
- if (const_args[2]) {
- tcg_out_andi(s, args[0], args[1], args[2]);
- } else {
- tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
- }
- break;
-
- case INDEX_op_or_i32:
- if (const_args[2]) {
- tcg_out_ori(s, args[0], args[1], args[2]);
- } else {
- tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
- }
- break;
-
- case INDEX_op_xor_i32:
- tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
- break;
-
- case INDEX_op_andc_i32:
- if (const_args[2]) {
- tcg_out_andi(s, args[0], args[1], ~args[2]);
- } else {
- tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
- }
- break;
-
- case INDEX_op_shl_i32:
- if (const_args[2]) {
- tcg_out_shli(s, args[0], args[1], args[2]);
- } else {
- tcg_out_shl(s, args[0], args[1], args[2]);
- }
- break;
-
- case INDEX_op_shr_i32:
- if (const_args[2]) {
- tcg_out_shri(s, args[0], args[1], args[2]);
- } else {
- tcg_out_shr(s, args[0], args[1], args[2]);
- }
- break;
-
- case INDEX_op_sar_i32:
- if (const_args[2]) {
- tcg_out_sari(s, args[0], args[1], args[2]);
- } else {
- tcg_out_sar(s, args[0], args[1], args[2]);
- }
- break;
-
- case INDEX_op_rotl_i32:
- if (const_args[2]) {
- tcg_out_rotli(s, args[0], args[1], args[2]);
- } else {
- tcg_out_rotl(s, args[0], args[1], args[2]);
- }
- break;
-
- case INDEX_op_rotr_i32:
- if (const_args[2]) {
- tcg_out_rotri(s, args[0], args[1], args[2]);
- } else {
- tcg_out_rotr(s, args[0], args[1], args[2]);
- }
- break;
-
- case INDEX_op_mul_i32:
- tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
- break;
- case INDEX_op_mulu2_i32:
- tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
- break;
-
- case INDEX_op_bswap16_i32:
- tcg_out_bswap16(s, args[0], args[1], 0);
- break;
- case INDEX_op_bswap32_i32:
- tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
- break;
-
- case INDEX_op_not_i32:
- tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
- break;
- case INDEX_op_ext8s_i32:
- tcg_out_ext8s(s, args[0], args[1]);
- break;
- case INDEX_op_ext16s_i32:
- tcg_out_ext16s(s, args[0], args[1]);
- break;
-
- case INDEX_op_brcond_i32:
- tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
- break;
- case INDEX_op_brcond2_i32:
- tcg_out_brcond2(s, args[4], args[0], args[1],
- args[2], const_args[2],
- args[3], const_args[3], args[5]);
- break;
-
- case INDEX_op_setcond_i32:
- tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_setcond2_i32:
- tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
- args[3], const_args[3], args[4], const_args[4]);
- break;
-
- case INDEX_op_movcond_i32:
- tcg_out_movcond(s, args[5], args[0], args[1], args[2], const_args[2],
- args[3], const_args[3]);
- break;
-
- case INDEX_op_add2_i32:
- tcg_out_add2(s, args[0], args[1], args[2], args[3],
- args[4], args[5], const_args[4]);
- break;
-
- case INDEX_op_sub2_i32:
- tcg_out_sub2(s, args[0], args[1], args[2], args[3],
- args[4], args[5], const_args[2], const_args[4]);
- break;
-
- case INDEX_op_deposit_i32:
- if (const_args[2]) {
- tcg_out_depi(s, args[0], args[2], args[3], args[4]);
- } else {
- tcg_out_dep(s, args[0], args[2], args[3], args[4]);
- }
- break;
-
- case INDEX_op_qemu_ld8u:
- tcg_out_qemu_ld(s, args, 0);
- break;
- case INDEX_op_qemu_ld8s:
- tcg_out_qemu_ld(s, args, 0 | 4);
- break;
- case INDEX_op_qemu_ld16u:
- tcg_out_qemu_ld(s, args, 1);
- break;
- case INDEX_op_qemu_ld16s:
- tcg_out_qemu_ld(s, args, 1 | 4);
- break;
- case INDEX_op_qemu_ld32:
- tcg_out_qemu_ld(s, args, 2);
- break;
- case INDEX_op_qemu_ld64:
- tcg_out_qemu_ld(s, args, 3);
- break;
-
- case INDEX_op_qemu_st8:
- tcg_out_qemu_st(s, args, 0);
- break;
- case INDEX_op_qemu_st16:
- tcg_out_qemu_st(s, args, 1);
- break;
- case INDEX_op_qemu_st32:
- tcg_out_qemu_st(s, args, 2);
- break;
- case INDEX_op_qemu_st64:
- tcg_out_qemu_st(s, args, 3);
- break;
-
- default:
- fprintf(stderr, "unknown opcode 0x%x\n", opc);
- tcg_abort();
- }
-}
-
-static const TCGTargetOpDef hppa_op_defs[] = {
- { INDEX_op_exit_tb, { } },
- { INDEX_op_goto_tb, { } },
-
- { INDEX_op_call, { "ri" } },
- { INDEX_op_br, { } },
-
- { INDEX_op_mov_i32, { "r", "r" } },
- { INDEX_op_movi_i32, { "r" } },
-
- { INDEX_op_ld8u_i32, { "r", "r" } },
- { INDEX_op_ld8s_i32, { "r", "r" } },
- { INDEX_op_ld16u_i32, { "r", "r" } },
- { INDEX_op_ld16s_i32, { "r", "r" } },
- { INDEX_op_ld_i32, { "r", "r" } },
- { INDEX_op_st8_i32, { "rZ", "r" } },
- { INDEX_op_st16_i32, { "rZ", "r" } },
- { INDEX_op_st_i32, { "rZ", "r" } },
-
- { INDEX_op_add_i32, { "r", "rZ", "ri" } },
- { INDEX_op_sub_i32, { "r", "rI", "ri" } },
- { INDEX_op_and_i32, { "r", "rZ", "rM" } },
- { INDEX_op_or_i32, { "r", "rZ", "rO" } },
- { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
- /* Note that the second argument will be inverted, which means
- we want a constant whose inversion matches M, and that O = ~M.
- See the implementation of and_mask_p. */
- { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
-
- { INDEX_op_mul_i32, { "r", "r", "r" } },
- { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
-
- { INDEX_op_shl_i32, { "r", "r", "ri" } },
- { INDEX_op_shr_i32, { "r", "r", "ri" } },
- { INDEX_op_sar_i32, { "r", "r", "ri" } },
- { INDEX_op_rotl_i32, { "r", "r", "ri" } },
- { INDEX_op_rotr_i32, { "r", "r", "ri" } },
-
- { INDEX_op_bswap16_i32, { "r", "r" } },
- { INDEX_op_bswap32_i32, { "r", "r" } },
- { INDEX_op_not_i32, { "r", "r" } },
-
- { INDEX_op_ext8s_i32, { "r", "r" } },
- { INDEX_op_ext16s_i32, { "r", "r" } },
-
- { INDEX_op_brcond_i32, { "rZ", "rJ" } },
- { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
-
- { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
- { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
-
- /* ??? We can actually support a signed 14-bit arg3, but we
- only have existing constraints for a signed 11-bit. */
- { INDEX_op_movcond_i32, { "r", "rZ", "rI", "rI", "0" } },
-
- { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
- { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
-
- { INDEX_op_deposit_i32, { "r", "0", "rJ" } },
-
-#if TARGET_LONG_BITS == 32
- { INDEX_op_qemu_ld8u, { "r", "L" } },
- { INDEX_op_qemu_ld8s, { "r", "L" } },
- { INDEX_op_qemu_ld16u, { "r", "L" } },
- { INDEX_op_qemu_ld16s, { "r", "L" } },
- { INDEX_op_qemu_ld32, { "r", "L" } },
- { INDEX_op_qemu_ld64, { "r", "r", "L" } },
-
- { INDEX_op_qemu_st8, { "LZ", "L" } },
- { INDEX_op_qemu_st16, { "LZ", "L" } },
- { INDEX_op_qemu_st32, { "LZ", "L" } },
- { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
-#else
- { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
- { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
- { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
- { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
- { INDEX_op_qemu_ld32, { "r", "L", "L" } },
- { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
-
- { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
- { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
- { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
- { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
-#endif
- { -1 },
-};
-
-static int tcg_target_callee_save_regs[] = {
- /* R2, the return address register, is saved specially
- in the caller's frame. */
- /* R3, the frame pointer, is not currently modified. */
- TCG_REG_R4,
- TCG_REG_R5,
- TCG_REG_R6,
- TCG_REG_R7,
- TCG_REG_R8,
- TCG_REG_R9,
- TCG_REG_R10,
- TCG_REG_R11,
- TCG_REG_R12,
- TCG_REG_R13,
- TCG_REG_R14,
- TCG_REG_R15,
- TCG_REG_R16,
- TCG_REG_R17, /* R17 is the global env. */
- TCG_REG_R18
-};
-
-#define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \
- + TCG_TARGET_STATIC_CALL_ARGS_SIZE \
- + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \
- + CPU_TEMP_BUF_NLONGS * sizeof(long) \
- + TCG_TARGET_STACK_ALIGN - 1) \
- & -TCG_TARGET_STACK_ALIGN)
-
-static void tcg_target_qemu_prologue(TCGContext *s)
-{
- int frame_size, i;
-
- frame_size = FRAME_SIZE;
-
- /* The return address is stored in the caller's frame. */
- tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20);
-
- /* Allocate stack frame, saving the first register at the same time. */
- tcg_out_ldst(s, tcg_target_callee_save_regs[0],
- TCG_REG_CALL_STACK, frame_size, INSN_STWM);
-
- /* Save all callee saved registers. */
- for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
- tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
- TCG_REG_CALL_STACK, -frame_size + i * 4);
- }
-
- /* Record the location of the TCG temps. */
- tcg_set_frame(s, TCG_REG_CALL_STACK, -frame_size + i * 4,
- CPU_TEMP_BUF_NLONGS * sizeof(long));
-
-#ifdef CONFIG_USE_GUEST_BASE
- if (GUEST_BASE != 0) {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
- tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
- }
-#endif
-
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
-
- /* Jump to TB, and adjust R18 to be the return address. */
- tcg_out32(s, INSN_BLE_SR4 | INSN_R2(tcg_target_call_iarg_regs[1]));
- tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31);
-
- /* Restore callee saved registers. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK,
- -frame_size - 20);
- for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
- tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
- TCG_REG_CALL_STACK, -frame_size + i * 4);
- }
-
- /* Deallocate stack frame and return. */
- tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
- tcg_out_ldst(s, tcg_target_callee_save_regs[0],
- TCG_REG_CALL_STACK, -frame_size, INSN_LDWM);
-}
-
-static void tcg_target_init(TCGContext *s)
-{
- tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
-
- tcg_regset_clear(tcg_target_call_clobber_regs);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
-
- tcg_regset_clear(s->reserved_regs);
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* stack pointer */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
-
- tcg_add_target_add_op_defs(hppa_op_defs);
-}
-
-typedef struct {
- DebugFrameCIE cie;
- DebugFrameFDEHeader fde;
- uint8_t fde_def_cfa[4];
- uint8_t fde_ret_ofs[3];
- uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
-} DebugFrame;
-
-#define ELF_HOST_MACHINE EM_PARISC
-#define ELF_HOST_FLAGS EFA_PARISC_1_1
-
-/* ??? BFD (and thus GDB) wants very much to distinguish between HPUX
- and other extensions. We don't really care, but if we don't set this
- to *something* then the object file won't be properly matched. */
-#define ELF_OSABI ELFOSABI_LINUX
-
-static DebugFrame debug_frame = {
- .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
- .cie.id = -1,
- .cie.version = 1,
- .cie.code_align = 1,
- .cie.data_align = 1,
- .cie.return_column = 2,
-
- /* Total FDE size does not include the "len" member. */
- .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
-
- .fde_def_cfa = {
- 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */
- (-FRAME_SIZE & 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */
- (-FRAME_SIZE >> 7) & 0x7f
- },
- .fde_ret_ofs = {
- 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */
- },
- .fde_reg_ofs = {
- /* This must match the ordering in tcg_target_callee_save_regs. */
- 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */
- 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */
- 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */
- 0x80 + 7, 12, /* ... */
- 0x80 + 8, 16,
- 0x80 + 9, 20,
- 0x80 + 10, 24,
- 0x80 + 11, 28,
- 0x80 + 12, 32,
- 0x80 + 13, 36,
- 0x80 + 14, 40,
- 0x80 + 15, 44,
- 0x80 + 16, 48,
- 0x80 + 17, 52,
- 0x80 + 18, 56,
- }
-};
-
-void tcg_register_jit(void *buf, size_t buf_size)
-{
- debug_frame.fde.func_start = (tcg_target_long) buf;
- debug_frame.fde.func_len = buf_size;
-
- tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
-}
diff --git a/tcg/hppa/tcg-target.h b/tcg/hppa/tcg-target.h
deleted file mode 100644
index 122edce7a7..0000000000
--- a/tcg/hppa/tcg-target.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Tiny Code Generator for QEMU
- *
- * Copyright (c) 2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef TCG_TARGET_HPPA
-#define TCG_TARGET_HPPA 1
-
-#define TCG_TARGET_WORDS_BIGENDIAN
-
-#define TCG_TARGET_NB_REGS 32
-
-typedef enum {
- TCG_REG_R0 = 0,
- TCG_REG_R1,
- TCG_REG_RP,
- TCG_REG_R3,
- TCG_REG_R4,
- TCG_REG_R5,
- TCG_REG_R6,
- TCG_REG_R7,
- TCG_REG_R8,
- TCG_REG_R9,
- TCG_REG_R10,
- TCG_REG_R11,
- TCG_REG_R12,
- TCG_REG_R13,
- TCG_REG_R14,
- TCG_REG_R15,
- TCG_REG_R16,
- TCG_REG_R17,
- TCG_REG_R18,
- TCG_REG_R19,
- TCG_REG_R20,
- TCG_REG_R21,
- TCG_REG_R22,
- TCG_REG_R23,
- TCG_REG_R24,
- TCG_REG_R25,
- TCG_REG_R26,
- TCG_REG_DP,
- TCG_REG_RET0,
- TCG_REG_RET1,
- TCG_REG_SP,
- TCG_REG_R31,
-} TCGReg;
-
-#define TCG_CT_CONST_0 0x0100
-#define TCG_CT_CONST_S5 0x0200
-#define TCG_CT_CONST_S11 0x0400
-#define TCG_CT_CONST_MS11 0x0800
-#define TCG_CT_CONST_AND 0x1000
-#define TCG_CT_CONST_OR 0x2000
-
-/* used for function call generation */
-#define TCG_REG_CALL_STACK TCG_REG_SP
-#define TCG_TARGET_STACK_ALIGN 64
-#define TCG_TARGET_CALL_STACK_OFFSET -48
-#define TCG_TARGET_STATIC_CALL_ARGS_SIZE 8*4
-#define TCG_TARGET_CALL_ALIGN_ARGS 1
-#define TCG_TARGET_STACK_GROWSUP
-
-/* optional instructions */
-#define TCG_TARGET_HAS_div_i32 0
-#define TCG_TARGET_HAS_rem_i32 0
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_andc_i32 1
-#define TCG_TARGET_HAS_orc_i32 0
-#define TCG_TARGET_HAS_eqv_i32 0
-#define TCG_TARGET_HAS_nand_i32 0
-#define TCG_TARGET_HAS_nor_i32 0
-#define TCG_TARGET_HAS_deposit_i32 1
-#define TCG_TARGET_HAS_movcond_i32 1
-#define TCG_TARGET_HAS_muls2_i32 0
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-
-/* optional instructions automatically implemented */
-#define TCG_TARGET_HAS_neg_i32 0 /* sub rd, 0, rs */
-#define TCG_TARGET_HAS_ext8u_i32 0 /* and rd, rs, 0xff */
-#define TCG_TARGET_HAS_ext16u_i32 0 /* and rd, rs, 0xffff */
-
-#define TCG_AREG0 TCG_REG_R17
-
-
-static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
-{
- start &= ~31;
- while (start <= stop) {
- asm volatile ("fdc 0(%0)\n\t"
- "sync\n\t"
- "fic 0(%%sr4, %0)\n\t"
- "sync"
- : : "r"(start) : "memory");
- start += 32;
- }
-}
-
-#endif
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index c1f07415ab..b865b4b662 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
+#include "tcg-be-ldst.h"
+
#ifndef NDEBUG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
#if TCG_TARGET_REG_BITS == 64
@@ -1455,15 +1457,8 @@ static void add_qemu_ldst_label(TCGContext *s,
uint8_t *raddr,
uint8_t **label_ptr)
{
- int idx;
- TCGLabelQemuLdst *label;
-
- if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
- tcg_abort();
- }
+ TCGLabelQemuLdst *label = new_ldst_label(s);
- idx = s->nb_qemu_ldst_labels++;
- label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
label->is_ld = is_ld;
label->opc = opc;
label->datalo_reg = data_reg;
@@ -1628,25 +1623,6 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_push(s, retaddr);
tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[s_bits]);
}
-
-/*
- * Generate TB finalization at the end of block
- */
-void tcg_out_tb_finalize(TCGContext *s)
-{
- int i;
- TCGLabelQemuLdst *label;
-
- /* qemu_ld/st slow paths */
- for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
- label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[i];
- if (label->is_ld) {
- tcg_out_qemu_ld_slow_path(s, label);
- } else {
- tcg_out_qemu_st_slow_path(s, label);
- }
- }
-}
#endif /* CONFIG_SOFTMMU */
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index d32d7ef6f0..fa7d966461 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -130,6 +130,8 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i64 0
#endif
+#define TCG_TARGET_HAS_new_ldst 0
+
#define TCG_TARGET_deposit_i32_valid(ofs, len) \
(((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \
((ofs) == 0 && (len) == 16))
diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c
index cd4f1ae1db..0656d3907a 100644
--- a/tcg/ia64/tcg-target.c
+++ b/tcg/ia64/tcg-target.c
@@ -23,6 +23,8 @@
* THE SOFTWARE.
*/
+#include "tcg-be-null.h"
+
/*
* Register definitions
*/
diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h
index 4330c9cdd3..c90038aae5 100644
--- a/tcg/ia64/tcg-target.h
+++ b/tcg/ia64/tcg-target.h
@@ -151,6 +151,8 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_mulsh_i64 0
+#define TCG_TARGET_HAS_new_ldst 0
+
#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)
diff --git a/tcg/mips/tcg-target.c b/tcg/mips/tcg-target.c
index 5f0a65b4ea..40551cdcb5 100644
--- a/tcg/mips/tcg-target.c
+++ b/tcg/mips/tcg-target.c
@@ -24,6 +24,8 @@
* THE SOFTWARE.
*/
+#include "tcg-be-null.h"
+
#if defined(TCG_TARGET_WORDS_BIGENDIAN) == defined(TARGET_WORDS_BIGENDIAN)
# define TCG_NEED_BSWAP 0
#else
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
index c37252269f..683c6af8b9 100644
--- a/tcg/mips/tcg-target.h
+++ b/tcg/mips/tcg-target.h
@@ -122,6 +122,8 @@ extern bool use_mips32r2_instructions;
#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
+#define TCG_TARGET_HAS_new_ldst 0
+
/* optional instructions automatically implemented */
#define TCG_TARGET_HAS_neg_i32 0 /* sub rd, zero, rt */
#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */
diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c
index 97e33edcfd..68778c2bc4 100644
--- a/tcg/ppc/tcg-target.c
+++ b/tcg/ppc/tcg-target.c
@@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
+#include "tcg-be-ldst.h"
+
static uint8_t *tb_ret_addr;
#if defined _CALL_DARWIN || defined __APPLE__
@@ -532,15 +534,8 @@ static void add_qemu_ldst_label (TCGContext *s,
uint8_t *raddr,
uint8_t *label_ptr)
{
- int idx;
- TCGLabelQemuLdst *label;
-
- if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
- tcg_abort();
- }
+ TCGLabelQemuLdst *label = new_ldst_label(s);
- idx = s->nb_qemu_ldst_labels++;
- label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
label->is_ld = is_ld;
label->opc = opc;
label->datalo_reg = data_reg;
@@ -889,23 +884,6 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_b(s, LK, (uintptr_t)st_trampolines[l->opc]);
tcg_out_b(s, 0, (uintptr_t)l->raddr);
}
-
-void tcg_out_tb_finalize(TCGContext *s)
-{
- int i;
- TCGLabelQemuLdst *label;
-
- /* qemu_ld/st slow paths */
- for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
- label = (TCGLabelQemuLdst *) &s->qemu_ldst_labels[i];
- if (label->is_ld) {
- tcg_out_qemu_ld_slow_path (s, label);
- }
- else {
- tcg_out_qemu_st_slow_path (s, label);
- }
- }
-}
#endif
#ifdef CONFIG_SOFTMMU
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index c9f8ff5206..e3ac6296e0 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -99,6 +99,8 @@ typedef enum {
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_new_ldst 0
+
#define TCG_AREG0 TCG_REG_R27
#define tcg_qemu_tb_exec(env, tb_ptr) \
diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c
index 332f4d8df1..12c1f61b03 100644
--- a/tcg/ppc64/tcg-target.c
+++ b/tcg/ppc64/tcg-target.c
@@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
+#include "tcg-be-ldst.h"
+
#define TCG_CT_CONST_S16 0x100
#define TCG_CT_CONST_U16 0x200
#define TCG_CT_CONST_S32 0x400
@@ -931,15 +933,8 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, int opc,
int data_reg, int addr_reg, int mem_index,
uint8_t *raddr, uint8_t *label_ptr)
{
- int idx;
- TCGLabelQemuLdst *label;
-
- if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
- tcg_abort();
- }
+ TCGLabelQemuLdst *label = new_ldst_label(s);
- idx = s->nb_qemu_ldst_labels++;
- label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
label->is_ld = is_ld;
label->opc = opc;
label->datalo_reg = data_reg;
@@ -998,21 +993,6 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_b(s, 0, (uintptr_t)lb->raddr);
}
-
-void tcg_out_tb_finalize(TCGContext *s)
-{
- int i, n = s->nb_qemu_ldst_labels;
-
- /* qemu_ld/st slow paths */
- for (i = 0; i < n; i++) {
- TCGLabelQemuLdst *label = &s->qemu_ldst_labels[i];
- if (label->is_ld) {
- tcg_out_qemu_ld_slow_path(s, label);
- } else {
- tcg_out_qemu_st_slow_path(s, label);
- }
- }
-}
#endif /* SOFTMMU */
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
diff --git a/tcg/ppc64/tcg-target.h b/tcg/ppc64/tcg-target.h
index fa4b9da093..457ea69de6 100644
--- a/tcg/ppc64/tcg-target.h
+++ b/tcg/ppc64/tcg-target.h
@@ -123,6 +123,8 @@ typedef enum {
#define TCG_TARGET_HAS_muluh_i64 1
#define TCG_TARGET_HAS_mulsh_i64 1
+#define TCG_TARGET_HAS_new_ldst 0
+
#define TCG_AREG0 TCG_REG_R27
#define TCG_TARGET_EXTEND_ARGS 1
diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index 1b44aeee96..0a4f3be0e9 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -24,6 +24,8 @@
* THE SOFTWARE.
*/
+#include "tcg-be-null.h"
+
/* We only support generating code for 64-bit mode. */
#if TCG_TARGET_REG_BITS != 64
#error "unsupported code generation mode"
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
index 6142fb26a2..10adb778c7 100644
--- a/tcg/s390/tcg-target.h
+++ b/tcg/s390/tcg-target.h
@@ -99,6 +99,8 @@ typedef enum TCGReg {
#define TCG_TARGET_HAS_muluh_i64 0
#define TCG_TARGET_HAS_mulsh_i64 0
+#define TCG_TARGET_HAS_new_ldst 0
+
extern bool tcg_target_deposit_valid(int ofs, int len);
#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid
#define TCG_TARGET_deposit_i64_valid tcg_target_deposit_valid
diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c
index 9574954ac4..cbd1c91779 100644
--- a/tcg/sparc/tcg-target.c
+++ b/tcg/sparc/tcg-target.c
@@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
+#include "tcg-be-null.h"
+
#ifndef NDEBUG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"%g0",
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
index 1ff2922bbe..00f3a1848b 100644
--- a/tcg/sparc/tcg-target.h
+++ b/tcg/sparc/tcg-target.h
@@ -148,6 +148,8 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i64 0
#endif
+#define TCG_TARGET_HAS_new_ldst 0
+
#define TCG_AREG0 TCG_REG_I0
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
diff --git a/tcg/tcg-be-ldst.h b/tcg/tcg-be-ldst.h
new file mode 100644
index 0000000000..2826d296d7
--- /dev/null
+++ b/tcg/tcg-be-ldst.h
@@ -0,0 +1,90 @@
+/*
+ * TCG Backend Data: load-store optimization only.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifdef CONFIG_SOFTMMU
+#define TCG_MAX_QEMU_LDST 640
+
+typedef struct TCGLabelQemuLdst {
+ int is_ld:1; /* qemu_ld: 1, qemu_st: 0 */
+ int opc:4;
+ TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
+ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
+ TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
+ TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
+ int mem_index; /* soft MMU memory index */
+ uint8_t *raddr; /* gen code addr of the next IR of qemu_ld/st IR */
+ uint8_t *label_ptr[2]; /* label pointers to be updated */
+} TCGLabelQemuLdst;
+
+typedef struct TCGBackendData {
+ int nb_ldst_labels;
+ TCGLabelQemuLdst ldst_labels[TCG_MAX_QEMU_LDST];
+} TCGBackendData;
+
+
+/*
+ * Initialize TB backend data at the beginning of the TB.
+ */
+
+static inline void tcg_out_tb_init(TCGContext *s)
+{
+ s->be->nb_ldst_labels = 0;
+}
+
+/*
+ * Generate TB finalization at the end of block
+ */
+
+static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
+static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
+
+static void tcg_out_tb_finalize(TCGContext *s)
+{
+ TCGLabelQemuLdst *lb = s->be->ldst_labels;
+ int i, n = s->be->nb_ldst_labels;
+
+ /* qemu_ld/st slow paths */
+ for (i = 0; i < n; i++) {
+ if (lb[i].is_ld) {
+ tcg_out_qemu_ld_slow_path(s, lb + i);
+ } else {
+ tcg_out_qemu_st_slow_path(s, lb + i);
+ }
+ }
+}
+
+/*
+ * Allocate a new TCGLabelQemuLdst entry.
+ */
+
+static inline TCGLabelQemuLdst *new_ldst_label(TCGContext *s)
+{
+ TCGBackendData *be = s->be;
+ int n = be->nb_ldst_labels;
+
+ assert(n < TCG_MAX_QEMU_LDST);
+ be->nb_ldst_labels = n + 1;
+ return &be->ldst_labels[n];
+}
+#else
+#include "tcg-be-null.h"
+#endif /* CONFIG_SOFTMMU */
diff --git a/tcg/tcg-be-null.h b/tcg/tcg-be-null.h
new file mode 100644
index 0000000000..74c57d5a6c
--- /dev/null
+++ b/tcg/tcg-be-null.h
@@ -0,0 +1,43 @@
+/*
+ * TCG Backend Data: No backend data
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+typedef struct TCGBackendData {
+ /* Empty */
+ char dummy;
+} TCGBackendData;
+
+
+/*
+ * Initialize TB backend data at the beginning of the TB.
+ */
+
+static inline void tcg_out_tb_init(TCGContext *s)
+{
+}
+
+/*
+ * Generate TB finalization at the end of block
+ */
+
+static inline void tcg_out_tb_finalize(TCGContext *s)
+{
+}
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index bb30a7cf39..7eabf22f01 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -137,24 +137,6 @@ static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val,
*tcg_ctx.gen_opparam_ptr++ = offset;
}
-static inline void tcg_gen_qemu_ldst_op_i64_i32(TCGOpcode opc, TCGv_i64 val,
- TCGv_i32 addr, TCGArg mem_index)
-{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(val);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(addr);
- *tcg_ctx.gen_opparam_ptr++ = mem_index;
-}
-
-static inline void tcg_gen_qemu_ldst_op_i64_i64(TCGOpcode opc, TCGv_i64 val,
- TCGv_i64 addr, TCGArg mem_index)
-{
- *tcg_ctx.gen_opc_ptr++ = opc;
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(val);
- *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(addr);
- *tcg_ctx.gen_opparam_ptr++ = mem_index;
-}
-
static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
TCGv_i32 arg3, TCGv_i32 arg4)
{
@@ -361,6 +343,21 @@ static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 arg1,
*tcg_ctx.gen_opparam_ptr++ = arg6;
}
+static inline void tcg_add_param_i32(TCGv_i32 val)
+{
+ *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(val);
+}
+
+static inline void tcg_add_param_i64(TCGv_i64 val)
+{
+#if TCG_TARGET_REG_BITS == 32
+ *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(TCGV_LOW(val));
+ *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(TCGV_HIGH(val));
+#else
+ *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(val);
+#endif
+}
+
static inline void gen_set_label(int n)
{
tcg_gen_op1i(INDEX_op_set_label, n);
@@ -2600,11 +2597,12 @@ static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh,
#define tcg_global_mem_new tcg_global_mem_new_i32
#define tcg_temp_local_new() tcg_temp_local_new_i32()
#define tcg_temp_free tcg_temp_free_i32
-#define tcg_gen_qemu_ldst_op tcg_gen_op3i_i32
-#define tcg_gen_qemu_ldst_op_i64 tcg_gen_qemu_ldst_op_i64_i32
#define TCGV_UNUSED(x) TCGV_UNUSED_I32(x)
#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I32(x)
#define TCGV_EQUAL(a, b) TCGV_EQUAL_I32(a, b)
+#define tcg_add_param_tl tcg_add_param_i32
+#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
+#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
#else
#define TCGv TCGv_i64
#define tcg_temp_new() tcg_temp_new_i64()
@@ -2612,11 +2610,12 @@ static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh,
#define tcg_global_mem_new tcg_global_mem_new_i64
#define tcg_temp_local_new() tcg_temp_local_new_i64()
#define tcg_temp_free tcg_temp_free_i64
-#define tcg_gen_qemu_ldst_op tcg_gen_op3i_i64
-#define tcg_gen_qemu_ldst_op_i64 tcg_gen_qemu_ldst_op_i64_i64
#define TCGV_UNUSED(x) TCGV_UNUSED_I64(x)
#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I64(x)
#define TCGV_EQUAL(a, b) TCGV_EQUAL_I64(a, b)
+#define tcg_add_param_tl tcg_add_param_i64
+#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
+#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
#endif
/* debug info: write the PC of the corresponding QEMU CPU instruction */
@@ -2648,197 +2647,67 @@ static inline void tcg_gen_goto_tb(unsigned idx)
tcg_gen_op1i(INDEX_op_goto_tb, idx);
}
-#if TCG_TARGET_REG_BITS == 32
-static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op3i_i32(INDEX_op_qemu_ld8u, ret, addr, mem_index);
-#else
- tcg_gen_op4i_i32(INDEX_op_qemu_ld8u, TCGV_LOW(ret), TCGV_LOW(addr),
- TCGV_HIGH(addr), mem_index);
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
-#endif
-}
-
-static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op3i_i32(INDEX_op_qemu_ld8s, ret, addr, mem_index);
-#else
- tcg_gen_op4i_i32(INDEX_op_qemu_ld8s, TCGV_LOW(ret), TCGV_LOW(addr),
- TCGV_HIGH(addr), mem_index);
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
-#endif
-}
-static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op3i_i32(INDEX_op_qemu_ld16u, ret, addr, mem_index);
-#else
- tcg_gen_op4i_i32(INDEX_op_qemu_ld16u, TCGV_LOW(ret), TCGV_LOW(addr),
- TCGV_HIGH(addr), mem_index);
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
-#endif
-}
-
-static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op3i_i32(INDEX_op_qemu_ld16s, ret, addr, mem_index);
-#else
- tcg_gen_op4i_i32(INDEX_op_qemu_ld16s, TCGV_LOW(ret), TCGV_LOW(addr),
- TCGV_HIGH(addr), mem_index);
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
-#endif
-}
-
-static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op3i_i32(INDEX_op_qemu_ld32, ret, addr, mem_index);
-#else
- tcg_gen_op4i_i32(INDEX_op_qemu_ld32, TCGV_LOW(ret), TCGV_LOW(addr),
- TCGV_HIGH(addr), mem_index);
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
-#endif
-}
-
-static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op3i_i32(INDEX_op_qemu_ld32, ret, addr, mem_index);
-#else
- tcg_gen_op4i_i32(INDEX_op_qemu_ld32, TCGV_LOW(ret), TCGV_LOW(addr),
- TCGV_HIGH(addr), mem_index);
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
-#endif
-}
-
-static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op4i_i32(INDEX_op_qemu_ld64, TCGV_LOW(ret), TCGV_HIGH(ret), addr, mem_index);
-#else
- tcg_gen_op5i_i32(INDEX_op_qemu_ld64, TCGV_LOW(ret), TCGV_HIGH(ret),
- TCGV_LOW(addr), TCGV_HIGH(addr), mem_index);
-#endif
-}
-
-static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op3i_i32(INDEX_op_qemu_st8, arg, addr, mem_index);
-#else
- tcg_gen_op4i_i32(INDEX_op_qemu_st8, TCGV_LOW(arg), TCGV_LOW(addr),
- TCGV_HIGH(addr), mem_index);
-#endif
-}
-
-static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op3i_i32(INDEX_op_qemu_st16, arg, addr, mem_index);
-#else
- tcg_gen_op4i_i32(INDEX_op_qemu_st16, TCGV_LOW(arg), TCGV_LOW(addr),
- TCGV_HIGH(addr), mem_index);
-#endif
-}
-
-static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op3i_i32(INDEX_op_qemu_st32, arg, addr, mem_index);
-#else
- tcg_gen_op4i_i32(INDEX_op_qemu_st32, TCGV_LOW(arg), TCGV_LOW(addr),
- TCGV_HIGH(addr), mem_index);
-#endif
-}
-
-static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
-{
-#if TARGET_LONG_BITS == 32
- tcg_gen_op4i_i32(INDEX_op_qemu_st64, TCGV_LOW(arg), TCGV_HIGH(arg), addr,
- mem_index);
-#else
- tcg_gen_op5i_i32(INDEX_op_qemu_st64, TCGV_LOW(arg), TCGV_HIGH(arg),
- TCGV_LOW(addr), TCGV_HIGH(addr), mem_index);
-#endif
-}
-
-#define tcg_gen_ld_ptr(R, A, O) tcg_gen_ld_i32(TCGV_PTR_TO_NAT(R), (A), (O))
-#define tcg_gen_discard_ptr(A) tcg_gen_discard_i32(TCGV_PTR_TO_NAT(A))
-
-#else /* TCG_TARGET_REG_BITS == 32 */
+void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp);
+void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp);
+void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp);
+void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp);
static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld8u, ret, addr, mem_index);
+ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB);
}
static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld8s, ret, addr, mem_index);
+ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB);
}
static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld16u, ret, addr, mem_index);
+ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW);
}
static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld16s, ret, addr, mem_index);
+ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW);
}
static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
{
-#if TARGET_LONG_BITS == 32
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32, ret, addr, mem_index);
-#else
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32u, ret, addr, mem_index);
-#endif
+ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL);
}
static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
{
-#if TARGET_LONG_BITS == 32
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32, ret, addr, mem_index);
-#else
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32s, ret, addr, mem_index);
-#endif
+ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL);
}
static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ldst_op_i64(INDEX_op_qemu_ld64, ret, addr, mem_index);
+ tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEQ);
}
static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_st8, arg, addr, mem_index);
+ tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB);
}
static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_st16, arg, addr, mem_index);
+ tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW);
}
static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ldst_op(INDEX_op_qemu_st32, arg, addr, mem_index);
+ tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL);
}
static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ldst_op_i64(INDEX_op_qemu_st64, arg, addr, mem_index);
+ tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEQ);
}
-#define tcg_gen_ld_ptr(R, A, O) tcg_gen_ld_i64(TCGV_PTR_TO_NAT(R), (A), (O))
-#define tcg_gen_discard_ptr(A) tcg_gen_discard_i64(TCGV_PTR_TO_NAT(A))
-
-#endif /* TCG_TARGET_REG_BITS != 32 */
-
#if TARGET_LONG_BITS == 64
#define tcg_gen_movi_tl tcg_gen_movi_i64
#define tcg_gen_mov_tl tcg_gen_mov_i64
@@ -2997,17 +2866,25 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
#endif
#if TCG_TARGET_REG_BITS == 32
-#define tcg_gen_add_ptr(R, A, B) tcg_gen_add_i32(TCGV_PTR_TO_NAT(R), \
- TCGV_PTR_TO_NAT(A), \
- TCGV_PTR_TO_NAT(B))
-#define tcg_gen_addi_ptr(R, A, B) tcg_gen_addi_i32(TCGV_PTR_TO_NAT(R), \
- TCGV_PTR_TO_NAT(A), (B))
-#define tcg_gen_ext_i32_ptr(R, A) tcg_gen_mov_i32(TCGV_PTR_TO_NAT(R), (A))
-#else /* TCG_TARGET_REG_BITS == 32 */
-#define tcg_gen_add_ptr(R, A, B) tcg_gen_add_i64(TCGV_PTR_TO_NAT(R), \
- TCGV_PTR_TO_NAT(A), \
- TCGV_PTR_TO_NAT(B))
-#define tcg_gen_addi_ptr(R, A, B) tcg_gen_addi_i64(TCGV_PTR_TO_NAT(R), \
- TCGV_PTR_TO_NAT(A), (B))
-#define tcg_gen_ext_i32_ptr(R, A) tcg_gen_ext_i32_i64(TCGV_PTR_TO_NAT(R), (A))
-#endif /* TCG_TARGET_REG_BITS != 32 */
+# define tcg_gen_ld_ptr(R, A, O) \
+ tcg_gen_ld_i32(TCGV_PTR_TO_NAT(R), (A), (O))
+# define tcg_gen_discard_ptr(A) \
+ tcg_gen_discard_i32(TCGV_PTR_TO_NAT(A))
+# define tcg_gen_add_ptr(R, A, B) \
+ tcg_gen_add_i32(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), TCGV_PTR_TO_NAT(B))
+# define tcg_gen_addi_ptr(R, A, B) \
+ tcg_gen_addi_i32(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), (B))
+# define tcg_gen_ext_i32_ptr(R, A) \
+ tcg_gen_mov_i32(TCGV_PTR_TO_NAT(R), (A))
+#else
+# define tcg_gen_ld_ptr(R, A, O) \
+ tcg_gen_ld_i64(TCGV_PTR_TO_NAT(R), (A), (O))
+# define tcg_gen_discard_ptr(A) \
+ tcg_gen_discard_i64(TCGV_PTR_TO_NAT(A))
+# define tcg_gen_add_ptr(R, A, B) \
+ tcg_gen_add_i64(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), TCGV_PTR_TO_NAT(B))
+# define tcg_gen_addi_ptr(R, A, B) \
+ tcg_gen_addi_i64(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), (B))
+# define tcg_gen_ext_i32_ptr(R, A) \
+ tcg_gen_ext_i32_i64(TCGV_PTR_TO_NAT(R), (A))
+#endif /* TCG_TARGET_REG_BITS == 32 */
diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h
index a75c29d518..d71707d9bb 100644
--- a/tcg/tcg-opc.h
+++ b/tcg/tcg-opc.h
@@ -180,79 +180,107 @@ DEF(debug_insn_start, 0, 0, 1, TCG_OPF_NOT_PRESENT)
#endif
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_END)
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END)
-/* Note: even if TARGET_LONG_BITS is not defined, the INDEX_op
- constants must be defined */
+
+#define IMPL_NEW_LDST \
+ (TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS \
+ | IMPL(TCG_TARGET_HAS_new_ldst))
+
+#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
+DEF(qemu_ld_i32, 1, 1, 2, IMPL_NEW_LDST)
+DEF(qemu_st_i32, 0, 2, 2, IMPL_NEW_LDST)
+# if TCG_TARGET_REG_BITS == 64
+DEF(qemu_ld_i64, 1, 1, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
+DEF(qemu_st_i64, 0, 2, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
+# else
+DEF(qemu_ld_i64, 2, 1, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
+DEF(qemu_st_i64, 0, 3, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
+# endif
+#else
+DEF(qemu_ld_i32, 1, 2, 2, IMPL_NEW_LDST)
+DEF(qemu_st_i32, 0, 3, 2, IMPL_NEW_LDST)
+DEF(qemu_ld_i64, 2, 2, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
+DEF(qemu_st_i64, 0, 4, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
+#endif
+
+#undef IMPL_NEW_LDST
+
+#define IMPL_OLD_LDST \
+ (TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS \
+ | IMPL(!TCG_TARGET_HAS_new_ldst))
+
#if TCG_TARGET_REG_BITS == 32
#if TARGET_LONG_BITS == 32
-DEF(qemu_ld8u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld8u, 1, 1, 1, IMPL_OLD_LDST)
#else
-DEF(qemu_ld8u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld8u, 1, 2, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
-DEF(qemu_ld8s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld8s, 1, 1, 1, IMPL_OLD_LDST)
#else
-DEF(qemu_ld8s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld8s, 1, 2, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
-DEF(qemu_ld16u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld16u, 1, 1, 1, IMPL_OLD_LDST)
#else
-DEF(qemu_ld16u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld16u, 1, 2, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
-DEF(qemu_ld16s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld16s, 1, 1, 1, IMPL_OLD_LDST)
#else
-DEF(qemu_ld16s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld16s, 1, 2, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
-DEF(qemu_ld32, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld32, 1, 1, 1, IMPL_OLD_LDST)
#else
-DEF(qemu_ld32, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld32, 1, 2, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
-DEF(qemu_ld64, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld64, 2, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
#else
-DEF(qemu_ld64, 2, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld64, 2, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
#endif
#if TARGET_LONG_BITS == 32
-DEF(qemu_st8, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st8, 0, 2, 1, IMPL_OLD_LDST)
#else
-DEF(qemu_st8, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st8, 0, 3, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
-DEF(qemu_st16, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st16, 0, 2, 1, IMPL_OLD_LDST)
#else
-DEF(qemu_st16, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st16, 0, 3, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
-DEF(qemu_st32, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st32, 0, 2, 1, IMPL_OLD_LDST)
#else
-DEF(qemu_st32, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st32, 0, 3, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
-DEF(qemu_st64, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st64, 0, 3, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
#else
-DEF(qemu_st64, 0, 4, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st64, 0, 4, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
#endif
#else /* TCG_TARGET_REG_BITS == 32 */
-DEF(qemu_ld8u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_ld8s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_ld16u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_ld16s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_ld32, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_ld32u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_ld32s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_ld64, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld8u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
+DEF(qemu_ld8s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
+DEF(qemu_ld16u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
+DEF(qemu_ld16s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
+DEF(qemu_ld32, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
+DEF(qemu_ld32u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
+DEF(qemu_ld32s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
+DEF(qemu_ld64, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
-DEF(qemu_st8, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_st16, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_st32, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_st64, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st8, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
+DEF(qemu_st16, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
+DEF(qemu_st32, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
+DEF(qemu_st64, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
#endif /* TCG_TARGET_REG_BITS != 32 */
+#undef IMPL_OLD_LDST
+
#undef IMPL
#undef IMPL64
#undef DEF
diff --git a/tcg/tcg.c b/tcg/tcg.c
index fd7fb6b85e..66d3f3de80 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -103,6 +103,9 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
intptr_t arg2);
static int tcg_target_const_match(tcg_target_long val,
const TCGArgConstraint *arg_ct);
+static void tcg_out_tb_init(TCGContext *s);
+static void tcg_out_tb_finalize(TCGContext *s);
+
TCGOpDef tcg_op_defs[] = {
#define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
@@ -254,12 +257,41 @@ void tcg_pool_reset(TCGContext *s)
s->pool_current = NULL;
}
+#include "helper.h"
+
+typedef struct TCGHelperInfo {
+ void *func;
+ const char *name;
+} TCGHelperInfo;
+
+static const TCGHelperInfo all_helpers[] = {
+#define GEN_HELPER 2
+#include "helper.h"
+
+ /* Include tcg-runtime.c functions. */
+ { tcg_helper_div_i32, "div_i32" },
+ { tcg_helper_rem_i32, "rem_i32" },
+ { tcg_helper_divu_i32, "divu_i32" },
+ { tcg_helper_remu_i32, "remu_i32" },
+
+ { tcg_helper_shl_i64, "shl_i64" },
+ { tcg_helper_shr_i64, "shr_i64" },
+ { tcg_helper_sar_i64, "sar_i64" },
+ { tcg_helper_div_i64, "div_i64" },
+ { tcg_helper_rem_i64, "rem_i64" },
+ { tcg_helper_divu_i64, "divu_i64" },
+ { tcg_helper_remu_i64, "remu_i64" },
+ { tcg_helper_mulsh_i64, "mulsh_i64" },
+ { tcg_helper_muluh_i64, "muluh_i64" },
+};
+
void tcg_context_init(TCGContext *s)
{
- int op, total_args, n;
+ int op, total_args, n, i;
TCGOpDef *def;
TCGArgConstraint *args_ct;
int *sorted_args;
+ GHashTable *helper_table;
memset(s, 0, sizeof(*s));
s->nb_globals = 0;
@@ -284,7 +316,16 @@ void tcg_context_init(TCGContext *s)
sorted_args += n;
args_ct += n;
}
-
+
+ /* Register helpers. */
+ /* Use g_direct_hash/equal for direct pointer comparisons on func. */
+ s->helpers = helper_table = g_hash_table_new(NULL, NULL);
+
+ for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
+ g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
+ (gpointer)all_helpers[i].name);
+ }
+
tcg_target_init(s);
}
@@ -332,13 +373,7 @@ void tcg_func_start(TCGContext *s)
s->gen_opc_ptr = s->gen_opc_buf;
s->gen_opparam_ptr = s->gen_opparam_buf;
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
- /* Initialize qemu_ld/st labels to assist code generation at the end of TB
- for TLB miss cases at the end of TB */
- s->qemu_ldst_labels = tcg_malloc(sizeof(TCGLabelQemuLdst) *
- TCG_MAX_QEMU_LDST);
- s->nb_qemu_ldst_labels = 0;
-#endif
+ s->be = tcg_malloc(sizeof(TCGBackendData));
}
static inline void tcg_temp_alloc(TCGContext *s, int n)
@@ -620,25 +655,6 @@ int tcg_check_temp_count(void)
}
#endif
-void tcg_register_helper(void *func, const char *name)
-{
- TCGContext *s = &tcg_ctx;
- int n;
- if ((s->nb_helpers + 1) > s->allocated_helpers) {
- n = s->allocated_helpers;
- if (n == 0) {
- n = 4;
- } else {
- n *= 2;
- }
- s->helpers = realloc(s->helpers, n * sizeof(TCGHelperInfo));
- s->allocated_helpers = n;
- }
- s->helpers[s->nb_helpers].func = (uintptr_t)func;
- s->helpers[s->nb_helpers].name = name;
- s->nb_helpers++;
-}
-
/* Note: we convert the 64 bit args to 32 bit and do some alignment
and endian swap. Maybe it would be better to do the alignment
and endian swap in tcg_reg_alloc_call(). */
@@ -795,6 +811,188 @@ void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
}
#endif
+static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
+{
+ switch (op & MO_SIZE) {
+ case MO_8:
+ op &= ~MO_BSWAP;
+ break;
+ case MO_16:
+ break;
+ case MO_32:
+ if (!is64) {
+ op &= ~MO_SIGN;
+ }
+ break;
+ case MO_64:
+ if (!is64) {
+ tcg_abort();
+ }
+ break;
+ }
+ if (st) {
+ op &= ~MO_SIGN;
+ }
+ return op;
+}
+
+static const TCGOpcode old_ld_opc[8] = {
+ [MO_UB] = INDEX_op_qemu_ld8u,
+ [MO_SB] = INDEX_op_qemu_ld8s,
+ [MO_UW] = INDEX_op_qemu_ld16u,
+ [MO_SW] = INDEX_op_qemu_ld16s,
+#if TCG_TARGET_REG_BITS == 32
+ [MO_UL] = INDEX_op_qemu_ld32,
+ [MO_SL] = INDEX_op_qemu_ld32,
+#else
+ [MO_UL] = INDEX_op_qemu_ld32u,
+ [MO_SL] = INDEX_op_qemu_ld32s,
+#endif
+ [MO_Q] = INDEX_op_qemu_ld64,
+};
+
+static const TCGOpcode old_st_opc[4] = {
+ [MO_UB] = INDEX_op_qemu_st8,
+ [MO_UW] = INDEX_op_qemu_st16,
+ [MO_UL] = INDEX_op_qemu_st32,
+ [MO_Q] = INDEX_op_qemu_st64,
+};
+
+void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+{
+ memop = tcg_canonicalize_memop(memop, 0, 0);
+
+ if (TCG_TARGET_HAS_new_ldst) {
+ *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i32;
+ tcg_add_param_i32(val);
+ tcg_add_param_tl(addr);
+ *tcg_ctx.gen_opparam_ptr++ = memop;
+ *tcg_ctx.gen_opparam_ptr++ = idx;
+ return;
+ }
+
+ /* The old opcodes only support target-endian memory operations. */
+ assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
+ assert(old_ld_opc[memop & MO_SSIZE] != 0);
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ *tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
+ tcg_add_param_i32(val);
+ tcg_add_param_tl(addr);
+ *tcg_ctx.gen_opparam_ptr++ = idx;
+ } else {
+ TCGv_i64 val64 = tcg_temp_new_i64();
+
+ *tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
+ tcg_add_param_i64(val64);
+ tcg_add_param_tl(addr);
+ *tcg_ctx.gen_opparam_ptr++ = idx;
+
+ tcg_gen_trunc_i64_i32(val, val64);
+ tcg_temp_free_i64(val64);
+ }
+}
+
+void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+{
+ memop = tcg_canonicalize_memop(memop, 0, 1);
+
+ if (TCG_TARGET_HAS_new_ldst) {
+ *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i32;
+ tcg_add_param_i32(val);
+ tcg_add_param_tl(addr);
+ *tcg_ctx.gen_opparam_ptr++ = memop;
+ *tcg_ctx.gen_opparam_ptr++ = idx;
+ return;
+ }
+
+ /* The old opcodes only support target-endian memory operations. */
+ assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
+ assert(old_st_opc[memop & MO_SIZE] != 0);
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ *tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
+ tcg_add_param_i32(val);
+ tcg_add_param_tl(addr);
+ *tcg_ctx.gen_opparam_ptr++ = idx;
+ } else {
+ TCGv_i64 val64 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(val64, val);
+
+ *tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
+ tcg_add_param_i64(val64);
+ tcg_add_param_tl(addr);
+ *tcg_ctx.gen_opparam_ptr++ = idx;
+
+ tcg_temp_free_i64(val64);
+ }
+}
+
+void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+{
+ memop = tcg_canonicalize_memop(memop, 1, 0);
+
+#if TCG_TARGET_REG_BITS == 32
+ if ((memop & MO_SIZE) < MO_64) {
+ tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
+ if (memop & MO_SIGN) {
+ tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
+ } else {
+ tcg_gen_movi_i32(TCGV_HIGH(val), 0);
+ }
+ return;
+ }
+#endif
+
+ if (TCG_TARGET_HAS_new_ldst) {
+ *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i64;
+ tcg_add_param_i64(val);
+ tcg_add_param_tl(addr);
+ *tcg_ctx.gen_opparam_ptr++ = memop;
+ *tcg_ctx.gen_opparam_ptr++ = idx;
+ return;
+ }
+
+ /* The old opcodes only support target-endian memory operations. */
+ assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
+ assert(old_ld_opc[memop & MO_SSIZE] != 0);
+
+ *tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
+ tcg_add_param_i64(val);
+ tcg_add_param_tl(addr);
+ *tcg_ctx.gen_opparam_ptr++ = idx;
+}
+
+void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+{
+ memop = tcg_canonicalize_memop(memop, 1, 1);
+
+#if TCG_TARGET_REG_BITS == 32
+ if ((memop & MO_SIZE) < MO_64) {
+ tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
+ return;
+ }
+#endif
+
+ if (TCG_TARGET_HAS_new_ldst) {
+ *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i64;
+ tcg_add_param_i64(val);
+ tcg_add_param_tl(addr);
+ *tcg_ctx.gen_opparam_ptr++ = memop;
+ *tcg_ctx.gen_opparam_ptr++ = idx;
+ return;
+ }
+
+ /* The old opcodes only support target-endian memory operations. */
+ assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
+ assert(old_st_opc[memop & MO_SIZE] != 0);
+
+ *tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
+ tcg_add_param_i64(val);
+ tcg_add_param_tl(addr);
+ *tcg_ctx.gen_opparam_ptr++ = idx;
+}
static void tcg_reg_alloc_start(TCGContext *s)
{
@@ -851,47 +1049,14 @@ char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
}
-static int helper_cmp(const void *p1, const void *p2)
+/* Find helper name. */
+static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
{
- const TCGHelperInfo *th1 = p1;
- const TCGHelperInfo *th2 = p2;
- if (th1->func < th2->func)
- return -1;
- else if (th1->func == th2->func)
- return 0;
- else
- return 1;
-}
-
-/* find helper definition (Note: A hash table would be better) */
-static TCGHelperInfo *tcg_find_helper(TCGContext *s, uintptr_t val)
-{
- int m, m_min, m_max;
- TCGHelperInfo *th;
- uintptr_t v;
-
- if (unlikely(!s->helpers_sorted)) {
- qsort(s->helpers, s->nb_helpers, sizeof(TCGHelperInfo),
- helper_cmp);
- s->helpers_sorted = 1;
+ const char *ret = NULL;
+ if (s->helpers) {
+ ret = g_hash_table_lookup(s->helpers, (gpointer)val);
}
-
- /* binary search */
- m_min = 0;
- m_max = s->nb_helpers - 1;
- while (m_min <= m_max) {
- m = (m_min + m_max) >> 1;
- th = &s->helpers[m];
- v = th->func;
- if (v == val)
- return th;
- else if (val < v) {
- m_max = m - 1;
- } else {
- m_min = m + 1;
- }
- }
- return NULL;
+ return ret;
}
static const char * const cond_name[] =
@@ -910,6 +1075,22 @@ static const char * const cond_name[] =
[TCG_COND_GTU] = "gtu"
};
+static const char * const ldst_name[] =
+{
+ [MO_UB] = "ub",
+ [MO_SB] = "sb",
+ [MO_LEUW] = "leuw",
+ [MO_LESW] = "lesw",
+ [MO_LEUL] = "leul",
+ [MO_LESL] = "lesl",
+ [MO_LEQ] = "leq",
+ [MO_BEUW] = "beuw",
+ [MO_BESW] = "besw",
+ [MO_BEUL] = "beul",
+ [MO_BESL] = "besl",
+ [MO_BEQ] = "beq",
+};
+
void tcg_dump_ops(TCGContext *s)
{
const uint16_t *opc_ptr;
@@ -976,7 +1157,7 @@ void tcg_dump_ops(TCGContext *s)
}
} else if (c == INDEX_op_movi_i32 || c == INDEX_op_movi_i64) {
tcg_target_ulong val;
- TCGHelperInfo *th;
+ const char *name;
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
@@ -984,9 +1165,9 @@ void tcg_dump_ops(TCGContext *s)
qemu_log(" %s %s,$", def->name,
tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0]));
val = args[1];
- th = tcg_find_helper(s, val);
- if (th) {
- qemu_log("%s", th->name);
+ name = tcg_find_helper(s, val);
+ if (name) {
+ qemu_log("%s", name);
} else {
if (c == INDEX_op_movi_i32) {
qemu_log("0x%x", (uint32_t)val);
@@ -1038,6 +1219,17 @@ void tcg_dump_ops(TCGContext *s)
}
i = 1;
break;
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_st_i32:
+ case INDEX_op_qemu_ld_i64:
+ case INDEX_op_qemu_st_i64:
+ if (args[k] < ARRAY_SIZE(ldst_name) && ldst_name[args[k]]) {
+ qemu_log(",%s", ldst_name[args[k++]]);
+ } else {
+ qemu_log(",$0x%" TCG_PRIlx, args[k++]);
+ }
+ i = 1;
+ break;
default:
i = 0;
break;
@@ -2311,6 +2503,8 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
s->code_buf = gen_code_buf;
s->code_ptr = gen_code_buf;
+ tcg_out_tb_init(s);
+
args = s->gen_opparam_buf;
op_index = 0;
@@ -2384,10 +2578,8 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
#endif
}
the_end:
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
/* Generate TB finalization at the end of block */
tcg_out_tb_finalize(s);
-#endif
return -1;
}
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 902c751d26..0d9bd293b5 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -197,6 +197,60 @@ typedef enum TCGType {
#endif
} TCGType;
+/* Constants for qemu_ld and qemu_st for the Memory Operation field. */
+typedef enum TCGMemOp {
+ MO_8 = 0,
+ MO_16 = 1,
+ MO_32 = 2,
+ MO_64 = 3,
+ MO_SIZE = 3, /* Mask for the above. */
+
+ MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
+
+ MO_BSWAP = 8, /* Host reverse endian. */
+#ifdef HOST_WORDS_BIGENDIAN
+ MO_LE = MO_BSWAP,
+ MO_BE = 0,
+#else
+ MO_LE = 0,
+ MO_BE = MO_BSWAP,
+#endif
+#ifdef TARGET_WORDS_BIGENDIAN
+ MO_TE = MO_BE,
+#else
+ MO_TE = MO_LE,
+#endif
+
+ /* Combinations of the above, for ease of use. */
+ MO_UB = MO_8,
+ MO_UW = MO_16,
+ MO_UL = MO_32,
+ MO_SB = MO_SIGN | MO_8,
+ MO_SW = MO_SIGN | MO_16,
+ MO_SL = MO_SIGN | MO_32,
+ MO_Q = MO_64,
+
+ MO_LEUW = MO_LE | MO_UW,
+ MO_LEUL = MO_LE | MO_UL,
+ MO_LESW = MO_LE | MO_SW,
+ MO_LESL = MO_LE | MO_SL,
+ MO_LEQ = MO_LE | MO_Q,
+
+ MO_BEUW = MO_BE | MO_UW,
+ MO_BEUL = MO_BE | MO_UL,
+ MO_BESW = MO_BE | MO_SW,
+ MO_BESL = MO_BE | MO_SL,
+ MO_BEQ = MO_BE | MO_Q,
+
+ MO_TEUW = MO_TE | MO_UW,
+ MO_TEUL = MO_TE | MO_UL,
+ MO_TESW = MO_TE | MO_SW,
+ MO_TESL = MO_TE | MO_SL,
+ MO_TEQ = MO_TE | MO_Q,
+
+ MO_SSIZE = MO_SIZE | MO_SIGN,
+} TCGMemOp;
+
typedef tcg_target_ulong TCGArg;
/* Define a type and accessor macros for variables. Using a struct is
@@ -211,24 +265,6 @@ typedef tcg_target_ulong TCGArg;
are aliases for target_ulong and host pointer sized values respectively.
*/
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
-/* Macros/structures for qemu_ld/st IR code optimization:
- TCG_MAX_HELPER_LABELS is defined as same as OPC_BUF_SIZE in exec-all.h. */
-#define TCG_MAX_QEMU_LDST 640
-
-typedef struct TCGLabelQemuLdst {
- int is_ld:1; /* qemu_ld: 1, qemu_st: 0 */
- int opc:4;
- int addrlo_reg; /* reg index for low word of guest virtual addr */
- int addrhi_reg; /* reg index for high word of guest virtual addr */
- int datalo_reg; /* reg index for low word to be loaded or stored */
- int datahi_reg; /* reg index for high word to be loaded or stored */
- int mem_index; /* soft MMU memory index */
- uint8_t *raddr; /* gen code addr of the next IR of qemu_ld/st IR */
- uint8_t *label_ptr[2]; /* label pointers to be updated */
-} TCGLabelQemuLdst;
-#endif
-
#ifdef CONFIG_DEBUG_TCG
#define DEBUG_TCGV 1
#endif
@@ -405,11 +441,6 @@ typedef struct TCGTemp {
const char *name;
} TCGTemp;
-typedef struct TCGHelperInfo {
- uintptr_t func;
- const char *name;
-} TCGHelperInfo;
-
typedef struct TCGContext TCGContext;
struct TCGContext {
@@ -447,10 +478,7 @@ struct TCGContext {
uint8_t *code_ptr;
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
- TCGHelperInfo *helpers;
- int nb_helpers;
- int allocated_helpers;
- int helpers_sorted;
+ GHashTable *helpers;
#ifdef CONFIG_PROFILER
/* profiling info */
@@ -496,12 +524,8 @@ struct TCGContext {
TBContext tb_ctx;
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
- /* labels info for qemu_ld/st IRs
- The labels help to generate TLB miss case codes at the end of TB */
- TCGLabelQemuLdst *qemu_ldst_labels;
- int nb_qemu_ldst_labels;
-#endif
+ /* The TCGBackendData structure is private to tcg-target.c. */
+ struct TCGBackendData *be;
};
extern TCGContext tcg_ctx;
@@ -680,8 +704,6 @@ TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, TCGArg *args,
TCGOpDef *tcg_op_def);
/* only used for debugging purposes */
-void tcg_register_helper(void *func, const char *name);
-const char *tcg_helper_get_name(TCGContext *s, void *func);
void tcg_dump_ops(TCGContext *s);
void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf);
@@ -745,11 +767,6 @@ TCGv_i64 tcg_const_local_i64(int64_t val);
void tcg_register_jit(void *buf, size_t buf_size);
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
-/* Generate TB finalization at the end of block */
-void tcg_out_tb_finalize(TCGContext *s);
-#endif
-
/*
* Memory helpers that will be used by TCG generated code.
*/
@@ -757,29 +774,66 @@ void tcg_out_tb_finalize(TCGContext *s);
/* Value zero-extended to tcg register size. */
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
-tcg_target_ulong helper_ret_lduw_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-tcg_target_ulong helper_ret_ldul_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
/* Value sign-extended to tcg register size. */
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
-tcg_target_ulong helper_ret_ldsw_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-tcg_target_ulong helper_ret_ldsl_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ int mmu_idx, uintptr_t retaddr);
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
int mmu_idx, uintptr_t retaddr);
-void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- int mmu_idx, uintptr_t retaddr);
-void helper_ret_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void helper_ret_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- int mmu_idx, uintptr_t retaddr);
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx, uintptr_t retaddr);
+void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx, uintptr_t retaddr);
+
+/* Temporary aliases until backends are converted. */
+#ifdef TARGET_WORDS_BIGENDIAN
+# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
+# define helper_ret_lduw_mmu helper_be_lduw_mmu
+# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
+# define helper_ret_ldul_mmu helper_be_ldul_mmu
+# define helper_ret_ldq_mmu helper_be_ldq_mmu
+# define helper_ret_stw_mmu helper_be_stw_mmu
+# define helper_ret_stl_mmu helper_be_stl_mmu
+# define helper_ret_stq_mmu helper_be_stq_mmu
+#else
+# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
+# define helper_ret_lduw_mmu helper_le_lduw_mmu
+# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
+# define helper_ret_ldul_mmu helper_le_ldul_mmu
+# define helper_ret_ldq_mmu helper_le_ldq_mmu
+# define helper_ret_stw_mmu helper_le_stw_mmu
+# define helper_ret_stl_mmu helper_le_stl_mmu
+# define helper_ret_stq_mmu helper_le_stq_mmu
+#endif
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c
index 4976becbe7..fc80704de8 100644
--- a/tcg/tci/tcg-target.c
+++ b/tcg/tci/tcg-target.c
@@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
+#include "tcg-be-null.h"
+
/* TODO list:
* - See TODO comments in code.
*/
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index c2ecfbe047..6e1da8c007 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -120,6 +120,8 @@
#define TCG_TARGET_HAS_mulsh_i64 0
#endif /* TCG_TARGET_REG_BITS == 64 */
+#define TCG_TARGET_HAS_new_ldst 0
+
/* Number of registers available.
For 32 bit hosts, we need more than 8 registers (call arguments). */
/* #define TCG_TARGET_NB_REGS 8 */
diff --git a/translate-all.c b/translate-all.c
index e7aff928b6..aeda54dfbd 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -1318,18 +1318,6 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
mmap_unlock();
}
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
-/* check whether the given addr is in TCG generated code buffer or not */
-bool is_tcg_gen_code(uintptr_t tc_ptr)
-{
- /* This can be called during code generation, code_gen_buffer_size
- is used instead of code_gen_ptr for upper boundary checking */
- return (tc_ptr >= (uintptr_t)tcg_ctx.code_gen_buffer &&
- tc_ptr < (uintptr_t)(tcg_ctx.code_gen_buffer +
- tcg_ctx.code_gen_buffer_size));
-}
-#endif
-
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
tb[1].tc_ptr. Return NULL if not found */
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)