diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2014-06-23 18:26:58 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2014-06-23 18:26:58 +0100 |
commit | 7ba48975d342ef4d9251097f24be0db328f36f7d (patch) | |
tree | 08b67d12fe2ea9fea0dea3ce05827e66a7d25a29 | |
parent | 9f862687eaf7335deb8955e1c25e1d36b9b3c90c (diff) | |
parent | a84ac4cbbb4f4705305189ef007a8432b0b9f718 (diff) | |
download | qemu-7ba48975d342ef4d9251097f24be0db328f36f7d.zip |
Merge remote-tracking branch 'remotes/rth/tcg-ppc-merge-1' into staging
* remotes/rth/tcg-ppc-merge-1: (25 commits)
tcg-ppc: Use the return address as a base pointer
tcg-ppc: Merge cache-utils into the backend
qemu/osdep: Remove the need for qemu_init_auxval
tcg-ppc: Rename the tcg/ppc64 backend
tcg-ppc: Remove the backend
tcg-ppc64: Merge ppc32 shifts
tcg-ppc64: Support mulsh_i32
tcg-ppc64: Merge ppc32 register usage
tcg-ppc64: Merge ppc32 qemu_ld/st
tcg-ppc64: Merge ppc32 brcond2, setcond2, muluh
tcg-ppc64: Begin merging ppc32 with ppc64
tcg-ppc64: Fix sub2 implementation
tcg-ppc64: Merge 32-bit ABIs into the prologue / frame code
tcg-ppc64: Adjust tcg_out_call for ELFv2
tcg-ppc64: Support the ppc64 elfv2 ABI
tcg-ppc64: Use the correct test in tcg_out_call
tcg-ppc64: Better parameterize the stack frame
tcg-ppc64: Fix TCG_TARGET_CALL_STACK_OFFSET
tcg-ppc64: Move call macros out of tcg-target.h
tcg-ppc64: Make TCG_AREG0 and TCG_REG_CALL_STACK enum constants
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rwxr-xr-x | configure | 2 | ||||
-rw-r--r-- | exec.c | 1 | ||||
-rw-r--r-- | include/exec/exec-all.h | 2 | ||||
-rw-r--r-- | include/qemu/cache-utils.h | 44 | ||||
-rw-r--r-- | include/qemu/osdep.h | 12 | ||||
-rw-r--r-- | linux-user/main.c | 4 | ||||
-rw-r--r-- | tcg/ppc/tcg-target.c | 3124 | ||||
-rw-r--r-- | tcg/ppc/tcg-target.h | 114 | ||||
-rw-r--r-- | tcg/ppc64/tcg-target.c | 2186 | ||||
-rw-r--r-- | tcg/ppc64/tcg-target.h | 131 | ||||
-rw-r--r-- | tcg/tcg.c | 1 | ||||
-rw-r--r-- | util/Makefile.objs | 2 | ||||
-rw-r--r-- | util/cache-utils.c | 86 | ||||
-rw-r--r-- | util/getauxval.c | 51 | ||||
-rw-r--r-- | vl.c | 4 |
15 files changed, 2055 insertions, 3709 deletions
@@ -4747,6 +4747,8 @@ elif test "$ARCH" = "s390x" ; then QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/s390 $QEMU_INCLUDES" elif test "$ARCH" = "x86_64" -o "$ARCH" = "x32" ; then QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/i386 $QEMU_INCLUDES" +elif test "$ARCH" = "ppc64" ; then + QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/ppc $QEMU_INCLUDES" else QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES" fi @@ -50,7 +50,6 @@ #include "exec/memory-internal.h" #include "exec/ram_addr.h" -#include "qemu/cache-utils.h" #include "qemu/range.h" diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 3d62d9c464..5e5d86ec46 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -224,7 +224,7 @@ static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) /* no need to flush icache explicitly */ } #elif defined(_ARCH_PPC) -void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr); +void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); #define tb_set_jmp_target1 ppc_tb_set_jmp_target #elif defined(__i386__) || defined(__x86_64__) static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) diff --git a/include/qemu/cache-utils.h b/include/qemu/cache-utils.h deleted file mode 100644 index 211245bea0..0000000000 --- a/include/qemu/cache-utils.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef QEMU_CACHE_UTILS_H -#define QEMU_CACHE_UTILS_H - -#if defined(_ARCH_PPC) - -#include <stdint.h> /* uintptr_t */ - -struct qemu_cache_conf { - unsigned long dcache_bsize; - unsigned long icache_bsize; -}; - -extern struct qemu_cache_conf qemu_cache_conf; - -void qemu_cache_utils_init(void); - -/* mildly adjusted code from tcg-dyngen.c */ -static inline void flush_icache_range(uintptr_t start, uintptr_t stop) -{ - unsigned long p, start1, stop1; - unsigned long dsize = qemu_cache_conf.dcache_bsize; - unsigned long isize = qemu_cache_conf.icache_bsize; - - start1 = start & ~(dsize - 1); - stop1 = (stop + dsize - 1) & ~(dsize - 1); - for (p = start1; p < stop1; p += dsize) { - asm volatile ("dcbst 0,%0" : : "r"(p) : "memory"); - } - asm volatile ("sync" : : : "memory"); - - start &= start & ~(isize - 1); - stop1 = (stop + isize - 1) & ~(isize - 1); - for (p = start1; p < stop1; p += isize) { - asm volatile ("icbi 0,%0" : : "r"(p) : "memory"); - } - asm volatile ("sync" : : : "memory"); - asm volatile ("isync" : : : "memory"); -} - -#else -#define qemu_cache_utils_init() do { } while (0) -#endif - -#endif /* QEMU_CACHE_UTILS_H */ diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index 6d35c1bcba..8480d523f0 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -251,18 +251,6 @@ unsigned long qemu_getauxval(unsigned long type); static inline unsigned long qemu_getauxval(unsigned long type) { return 0; } #endif -/** - * qemu_init_auxval: - * @envp: the third argument to main - * - * If supported and required, locate the auxiliary vector at program startup. - */ -#if defined(CONFIG_GETAUXVAL) || !defined(__linux__) -static inline void qemu_init_auxval(char **envp) { } -#else -void qemu_init_auxval(char **envp); -#endif - void qemu_set_tty_echo(int fd, bool echo); void os_mem_prealloc(int fd, char *area, size_t sz); diff --git a/linux-user/main.c b/linux-user/main.c index a87c6f7ed4..df1bb0e758 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -28,7 +28,6 @@ #include "qemu.h" #include "qemu-common.h" -#include "qemu/cache-utils.h" #include "cpu.h" #include "tcg.h" #include "qemu/timer.h" @@ -3829,9 +3828,6 @@ int main(int argc, char **argv, char **envp) module_call_init(MODULE_INIT_QOM); - qemu_init_auxval(envp); - qemu_cache_utils_init(); - if ((envlist = envlist_create()) == NULL) { (void) fprintf(stderr, "Unable to allocate envlist\n"); exit(1); diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c index 436b65bffb..c83fd9fdc0 100644 --- a/tcg/ppc/tcg-target.c +++ b/tcg/ppc/tcg-target.c @@ -24,27 +24,71 @@ #include "tcg-be-ldst.h" -static tcg_insn_unit *tb_ret_addr; - #if defined _CALL_DARWIN || defined __APPLE__ #define TCG_TARGET_CALL_DARWIN #endif +#ifdef _CALL_SYSV +# define TCG_TARGET_CALL_ALIGN_ARGS 1 +#endif -#ifdef TCG_TARGET_CALL_DARWIN -#define LINKAGE_AREA_SIZE 24 -#define LR_OFFSET 8 -#elif defined _CALL_AIX -#define LINKAGE_AREA_SIZE 52 -#define LR_OFFSET 8 +/* For some memory operations, we need a scratch that isn't R0. For the AIX + calling convention, we can re-use the TOC register since we'll be reloading + it at every call. Otherwise R12 will do nicely as neither a call-saved + register nor a parameter register. */ +#ifdef _CALL_AIX +# define TCG_REG_TMP1 TCG_REG_R2 #else -#define LINKAGE_AREA_SIZE 8 -#define LR_OFFSET 4 +# define TCG_REG_TMP1 TCG_REG_R12 #endif +/* For the 64-bit target, we don't like the 5 insn sequence needed to build + full 64-bit addresses. Better to have a base register to which we can + apply a 32-bit displacement. + + There are generally three items of interest: + (1) helper functions in the main executable, + (2) TranslationBlock data structures, + (3) the return address in the epilogue. + + For user-only, we USE_STATIC_CODE_GEN_BUFFER, so the code_gen_buffer + will be inside the main executable, and thus near enough to make a + pointer to the epilogue be within 2GB of all helper functions. + + For softmmu, we'll let the kernel choose the address of code_gen_buffer, + and odds are it'll be somewhere close to the main malloc arena, and so + a pointer to the epilogue will be within 2GB of the TranslationBlocks. + + For --enable-pie, everything will be kinda near everything else, + somewhere in high memory. + + Thus we choose to keep the return address in a call-saved register. */ +#define TCG_REG_RA TCG_REG_R31 +#define USE_REG_RA (TCG_TARGET_REG_BITS == 64) + +/* Shorthand for size of a pointer. Avoid promotion to unsigned. */ +#define SZP ((int)sizeof(void *)) + +/* Shorthand for size of a register. */ +#define SZR (TCG_TARGET_REG_BITS / 8) + +#define TCG_CT_CONST_S16 0x100 +#define TCG_CT_CONST_U16 0x200 +#define TCG_CT_CONST_S32 0x400 +#define TCG_CT_CONST_U32 0x800 +#define TCG_CT_CONST_ZERO 0x1000 +#define TCG_CT_CONST_MONE 0x2000 + +static tcg_insn_unit *tb_ret_addr; + #ifndef GUEST_BASE #define GUEST_BASE 0 #endif +#include "elf.h" +static bool have_isa_2_06; +#define HAVE_ISA_2_06 have_isa_2_06 +#define HAVE_ISEL have_isa_2_06 + #ifdef CONFIG_USE_GUEST_BASE #define TCG_GUEST_BASE_REG 30 #else @@ -89,7 +133,7 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #endif static const int tcg_target_reg_alloc_order[] = { - TCG_REG_R14, + TCG_REG_R14, /* call saved registers */ TCG_REG_R15, TCG_REG_R16, TCG_REG_R17, @@ -99,32 +143,26 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_R21, TCG_REG_R22, TCG_REG_R23, + TCG_REG_R24, + TCG_REG_R25, + TCG_REG_R26, + TCG_REG_R27, TCG_REG_R28, TCG_REG_R29, TCG_REG_R30, TCG_REG_R31, -#ifdef TCG_TARGET_CALL_DARWIN - TCG_REG_R2, -#endif - TCG_REG_R3, - TCG_REG_R4, - TCG_REG_R5, - TCG_REG_R6, - TCG_REG_R7, - TCG_REG_R8, - TCG_REG_R9, - TCG_REG_R10, -#ifndef TCG_TARGET_CALL_DARWIN + TCG_REG_R12, /* call clobbered, non-arguments */ TCG_REG_R11, -#endif - TCG_REG_R12, -#ifndef _CALL_SYSV + TCG_REG_R2, TCG_REG_R13, -#endif - TCG_REG_R24, - TCG_REG_R25, - TCG_REG_R26, - TCG_REG_R27 + TCG_REG_R10, /* call clobbered, arguments */ + TCG_REG_R9, + TCG_REG_R8, + TCG_REG_R7, + TCG_REG_R6, + TCG_REG_R5, + TCG_REG_R4, + TCG_REG_R3, }; static const int tcg_target_call_iarg_regs[] = { @@ -138,7 +176,7 @@ static const int tcg_target_call_iarg_regs[] = { TCG_REG_R10 }; -static const int tcg_target_call_oarg_regs[2] = { +static const int tcg_target_call_oarg_regs[] = { TCG_REG_R3, TCG_REG_R4 }; @@ -146,10 +184,6 @@ static const int tcg_target_call_oarg_regs[2] = { static const int tcg_target_callee_save_regs[] = { #ifdef TCG_TARGET_CALL_DARWIN TCG_REG_R11, - TCG_REG_R13, -#endif -#ifdef _CALL_AIX - TCG_REG_R13, #endif TCG_REG_R14, TCG_REG_R15, @@ -173,7 +207,7 @@ static const int tcg_target_callee_save_regs[] = { static inline bool in_range_b(tcg_target_long target) { - return target == sextract32(target, 0, 26); + return target == sextract64(target, 0, 26); } static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target) @@ -200,6 +234,18 @@ static void reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target) *pc = (*pc & ~0xfffc) | reloc_pc14_val(pc, target); } +static inline void tcg_out_b_noaddr(TCGContext *s, int insn) +{ + unsigned retrans = *s->code_ptr & 0x3fffffc; + tcg_out32(s, insn | retrans); +} + +static inline void tcg_out_bc_noaddr(TCGContext *s, int insn) +{ + unsigned retrans = *s->code_ptr & 0xfffc; + tcg_out32(s, insn | retrans); +} + static void patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { @@ -233,59 +279,43 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) ct->ct |= TCG_CT_REG; tcg_regset_set32(ct->u.regs, 0, 0xffffffff); break; -#ifdef CONFIG_SOFTMMU case 'L': /* qemu_ld constraint */ ct->ct |= TCG_CT_REG; tcg_regset_set32(ct->u.regs, 0, 0xffffffff); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); +#ifdef CONFIG_SOFTMMU tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); -#if TARGET_LONG_BITS == 64 - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6); -#ifdef TCG_TARGET_CALL_ALIGN_ARGS - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7); -#endif #endif break; - case 'K': /* qemu_st[8..32] constraint */ + case 'S': /* qemu_st constraint */ ct->ct |= TCG_CT_REG; tcg_regset_set32(ct->u.regs, 0, 0xffffffff); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); +#ifdef CONFIG_SOFTMMU tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6); -#if TARGET_LONG_BITS == 64 - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7); -#ifdef TCG_TARGET_CALL_ALIGN_ARGS - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R8); -#endif #endif break; - case 'M': /* qemu_st64 constraint */ - ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R8); -#ifdef TCG_TARGET_CALL_ALIGN_ARGS - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R9); -#endif + case 'I': + ct->ct |= TCG_CT_CONST_S16; break; -#else - case 'L': - case 'K': - ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + case 'J': + ct->ct |= TCG_CT_CONST_U16; break; case 'M': - ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); + ct->ct |= TCG_CT_CONST_MONE; + break; + case 'T': + ct->ct |= TCG_CT_CONST_S32; + break; + case 'U': + ct->ct |= TCG_CT_CONST_U32; + break; + case 'Z': + ct->ct |= TCG_CT_CONST_ZERO; break; -#endif default: return -1; } @@ -298,48 +328,86 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { - int ct; + int ct = arg_ct->ct; + if (ct & TCG_CT_CONST) { + return 1; + } + + /* The only 32-bit constraint we use aside from + TCG_CT_CONST is TCG_CT_CONST_S16. */ + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } - ct = arg_ct->ct; - if (ct & TCG_CT_CONST) + if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return 1; + } else if ((ct & TCG_CT_CONST_MONE) && val == -1) { + return 1; + } return 0; } #define OPCD(opc) ((opc)<<26) -#define XO31(opc) (OPCD(31)|((opc)<<1)) #define XO19(opc) (OPCD(19)|((opc)<<1)) - -#define B OPCD(18) -#define BC OPCD(16) -#define LBZ OPCD(34) -#define LHZ OPCD(40) -#define LHA OPCD(42) -#define LWZ OPCD(32) -#define STB OPCD(38) -#define STH OPCD(44) -#define STW OPCD(36) - -#define ADDIC OPCD(12) -#define ADDI OPCD(14) -#define ADDIS OPCD(15) -#define ORI OPCD(24) -#define ORIS OPCD(25) -#define XORI OPCD(26) -#define XORIS OPCD(27) -#define ANDI OPCD(28) -#define ANDIS OPCD(29) -#define MULLI OPCD( 7) -#define CMPLI OPCD(10) -#define CMPI OPCD(11) +#define MD30(opc) (OPCD(30)|((opc)<<2)) +#define MDS30(opc) (OPCD(30)|((opc)<<1)) +#define XO31(opc) (OPCD(31)|((opc)<<1)) +#define XO58(opc) (OPCD(58)|(opc)) +#define XO62(opc) (OPCD(62)|(opc)) + +#define B OPCD( 18) +#define BC OPCD( 16) +#define LBZ OPCD( 34) +#define LHZ OPCD( 40) +#define LHA OPCD( 42) +#define LWZ OPCD( 32) +#define STB OPCD( 38) +#define STH OPCD( 44) +#define STW OPCD( 36) + +#define STD XO62( 0) +#define STDU XO62( 1) +#define STDX XO31(149) + +#define LD XO58( 0) +#define LDX XO31( 21) +#define LDU XO58( 1) +#define LWA XO58( 2) +#define LWAX XO31(341) + +#define ADDIC OPCD( 12) +#define ADDI OPCD( 14) +#define ADDIS OPCD( 15) +#define ORI OPCD( 24) +#define ORIS OPCD( 25) +#define XORI OPCD( 26) +#define XORIS OPCD( 27) +#define ANDI OPCD( 28) +#define ANDIS OPCD( 29) +#define MULLI OPCD( 7) +#define CMPLI OPCD( 10) +#define CMPI OPCD( 11) #define SUBFIC OPCD( 8) -#define LWZU OPCD(33) -#define STWU OPCD(37) +#define LWZU OPCD( 33) +#define STWU OPCD( 37) + +#define RLWIMI OPCD( 20) +#define RLWINM OPCD( 21) +#define RLWNM OPCD( 23) -#define RLWIMI OPCD(20) -#define RLWINM OPCD(21) -#define RLWNM OPCD(23) +#define RLDICL MD30( 0) +#define RLDICR MD30( 1) +#define RLDIMI MD30( 3) +#define RLDCL MDS30( 8) #define BCLR XO19( 16) #define BCCTR XO19(528) @@ -351,16 +419,22 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define EXTSB XO31(954) #define EXTSH XO31(922) +#define EXTSW XO31(986) #define ADD XO31(266) #define ADDE XO31(138) +#define ADDME XO31(234) +#define ADDZE XO31(202) #define ADDC XO31( 10) #define AND XO31( 28) #define SUBF XO31( 40) #define SUBFC XO31( 8) #define SUBFE XO31(136) +#define SUBFME XO31(232) +#define SUBFZE XO31(200) #define OR XO31(444) #define XOR XO31(316) #define MULLW XO31(235) +#define MULHW XO31( 75) #define MULHWU XO31( 11) #define DIVW XO31(491) #define DIVWU XO31(459) @@ -368,21 +442,31 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define CMPL XO31( 32) #define LHBRX XO31(790) #define LWBRX XO31(534) +#define LDBRX XO31(532) #define STHBRX XO31(918) #define STWBRX XO31(662) +#define STDBRX XO31(660) #define MFSPR XO31(339) #define MTSPR XO31(467) #define SRAWI XO31(824) #define NEG XO31(104) #define MFCR XO31( 19) -#define CNTLZW XO31( 26) +#define MFOCRF (MFCR | (1u << 20)) #define NOR XO31(124) +#define CNTLZW XO31( 26) +#define CNTLZD XO31( 58) #define ANDC XO31( 60) #define ORC XO31(412) #define EQV XO31(284) #define NAND XO31(476) #define ISEL XO31( 15) +#define MULLD XO31(233) +#define MULHD XO31( 73) +#define MULHDU XO31( 9) +#define DIVD XO31(489) +#define DIVDU XO31(457) + #define LBZX XO31( 87) #define LHZX XO31(279) #define LHAX XO31(343) @@ -391,7 +475,7 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define STHX XO31(407) #define STWX XO31(151) -#define SPR(a,b) ((((a)<<5)|(b))<<11) +#define SPR(a, b) ((((a)<<5)|(b))<<11) #define LR SPR(8, 0) #define CTR SPR(9, 0) @@ -399,8 +483,15 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define SRW XO31(536) #define SRAW XO31(792) -#define TW XO31(4) -#define TRAP (TW | TO (31)) +#define SLD XO31( 27) +#define SRD XO31(539) +#define SRAD XO31(794) +#define SRADI XO31(413<<1) + +#define TW XO31( 4) +#define TRAP (TW | TO(31)) + +#define NOP ORI /* ori 0,0,0 */ #define RT(r) ((r)<<21) #define RS(r) ((r)<<21) @@ -411,21 +502,26 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define MB(b) ((b)<<6) #define ME(e) ((e)<<1) #define BO(o) ((o)<<21) +#define MB64(b) ((b)<<5) +#define FXM(b) (1 << (19 - (b))) #define LK 1 -#define TAB(t,a,b) (RT(t) | RA(a) | RB(b)) -#define SAB(s,a,b) (RS(s) | RA(a) | RB(b)) +#define TAB(t, a, b) (RT(t) | RA(a) | RB(b)) +#define SAB(s, a, b) (RS(s) | RA(a) | RB(b)) +#define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff)) +#define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff)) #define BF(n) ((n)<<23) #define BI(n, c) (((c)+((n)*4))<<16) #define BT(n, c) (((c)+((n)*4))<<21) #define BA(n, c) (((c)+((n)*4))<<16) #define BB(n, c) (((c)+((n)*4))<<11) +#define BC_(n, c) (((c)+((n)*4))<<6) -#define BO_COND_TRUE BO (12) -#define BO_COND_FALSE BO (4) -#define BO_ALWAYS BO (20) +#define BO_COND_TRUE BO(12) +#define BO_COND_FALSE BO( 4) +#define BO_ALWAYS BO(20) enum { CR_LT, @@ -435,557 +531,348 @@ enum { }; static const uint32_t tcg_to_bc[] = { - [TCG_COND_EQ] = BC | BI (7, CR_EQ) | BO_COND_TRUE, - [TCG_COND_NE] = BC | BI (7, CR_EQ) | BO_COND_FALSE, - [TCG_COND_LT] = BC | BI (7, CR_LT) | BO_COND_TRUE, - [TCG_COND_GE] = BC | BI (7, CR_LT) | BO_COND_FALSE, - [TCG_COND_LE] = BC | BI (7, CR_GT) | BO_COND_FALSE, - [TCG_COND_GT] = BC | BI (7, CR_GT) | BO_COND_TRUE, - [TCG_COND_LTU] = BC | BI (7, CR_LT) | BO_COND_TRUE, - [TCG_COND_GEU] = BC | BI (7, CR_LT) | BO_COND_FALSE, - [TCG_COND_LEU] = BC | BI (7, CR_GT) | BO_COND_FALSE, - [TCG_COND_GTU] = BC | BI (7, CR_GT) | BO_COND_TRUE, + [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE, + [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE, + [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE, + [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE, + [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE, + [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE, + [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE, + [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE, + [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE, + [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE, }; +/* The low bit here is set if the RA and RB fields must be inverted. */ +static const uint32_t tcg_to_isel[] = { + [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ), + [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1, + [TCG_COND_LT] = ISEL | BC_(7, CR_LT), + [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1, + [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1, + [TCG_COND_GT] = ISEL | BC_(7, CR_GT), + [TCG_COND_LTU] = ISEL | BC_(7, CR_LT), + [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1, + [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1, + [TCG_COND_GTU] = ISEL | BC_(7, CR_GT), +}; + +static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, + TCGReg base, tcg_target_long offset); + static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { + tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); if (ret != arg) { tcg_out32(s, OR | SAB(arg, ret, arg)); } } -static void tcg_out_movi(TCGContext *s, TCGType type, - TCGReg ret, tcg_target_long arg) +static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, + int sh, int mb) { - if (arg == (int16_t) arg) - tcg_out32 (s, ADDI | RT (ret) | RA (0) | (arg & 0xffff)); - else { - tcg_out32 (s, ADDIS | RT (ret) | RA (0) | ((arg >> 16) & 0xffff)); - if (arg & 0xffff) - tcg_out32 (s, ORI | RS (ret) | RA (ret) | (arg & 0xffff)); - } + assert(TCG_TARGET_REG_BITS == 64); + sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1); + mb = MB64((mb >> 5) | ((mb << 1) & 0x3f)); + tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb); } -static void tcg_out_ldst (TCGContext *s, int ret, int addr, - int offset, int op1, int op2) +static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, + int sh, int mb, int me) { - if (offset == (int16_t) offset) - tcg_out32 (s, op1 | RT (ret) | RA (addr) | (offset & 0xffff)); - else { - tcg_out_movi (s, TCG_TYPE_I32, 0, offset); - tcg_out32 (s, op2 | RT (ret) | RA (addr) | RB (0)); - } + tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me)); } -static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target) +static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src) { - ptrdiff_t disp = tcg_pcrel_diff(s, target); - if (in_range_b(disp)) { - tcg_out32(s, B | (disp & 0x3fffffc) | mask); - } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target); - tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR); - tcg_out32(s, BCCTR | BO_ALWAYS | mask); - } + tcg_out_rld(s, RLDICL, dst, src, 0, 32); } -static void tcg_out_call1(TCGContext *s, tcg_insn_unit *target, int lk) +static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c) { -#ifdef _CALL_AIX - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, (uintptr_t)target); - tcg_out32(s, LWZ | RT(TCG_REG_R0) | RA(reg)); - tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR); - tcg_out32(s, LWZ | RT(TCG_REG_R2) | RA(reg) | 4); - tcg_out32(s, BCCTR | BO_ALWAYS | lk); -#else - tcg_out_b(s, lk, target); -#endif + tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c); } -static void tcg_out_call(TCGContext *s, tcg_insn_unit *target) +static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c) { - tcg_out_call1(s, target, LK); + tcg_out_rld(s, RLDICR, dst, src, c, 63 - c); } -#if defined(CONFIG_SOFTMMU) - -static void add_qemu_ldst_label (TCGContext *s, - bool is_ld, - TCGMemOp opc, - int data_reg, - int data_reg2, - int addrlo_reg, - int addrhi_reg, - int mem_index, - tcg_insn_unit *raddr, - tcg_insn_unit *label_ptr) +static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c) { - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->opc = opc; - label->datalo_reg = data_reg; - label->datahi_reg = data_reg2; - label->addrlo_reg = addrlo_reg; - label->addrhi_reg = addrhi_reg; - label->mem_index = mem_index; - label->raddr = raddr; - label->label_ptr[0] = label_ptr; + tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31); } -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * int mmu_idx, uintptr_t ra) - */ -static void * const qemu_ld_helpers[16] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEQ] = helper_be_ldq_mmu, -}; - -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, int mmu_idx, uintptr_t ra) - */ -static void * const qemu_st_helpers[16] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, -}; - -static tcg_insn_unit *ld_trampolines[16]; -static tcg_insn_unit *st_trampolines[16]; - -/* Perform the TLB load and compare. Branches to the slow path, placing the - address of the branch in *LABEL_PTR. Loads the addend of the TLB into R0. - Clobbers R1 and R2. */ - -static void tcg_out_tlb_check(TCGContext *s, TCGReg r0, TCGReg r1, TCGReg r2, - TCGReg addrlo, TCGReg addrhi, TCGMemOp s_bits, - int mem_index, int is_load, - tcg_insn_unit **label_ptr) +static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c) { - int cmp_off = - (is_load - ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) - : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); - int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); - tcg_insn_unit retranst; - TCGReg base = TCG_AREG0; - - /* Extract the page index, shifted into place for tlb index. */ - tcg_out32(s, (RLWINM - | RA(r0) - | RS(addrlo) - | SH(32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)) - | MB(32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS)) - | ME(31 - CPU_TLB_ENTRY_BITS))); - - /* Compensate for very large offsets. */ - if (add_off >= 0x8000) { - /* Most target env are smaller than 32k; none are larger than 64k. - Simplify the logic here merely to offset by 0x7ff0, giving us a - range just shy of 64k. Check this assumption. */ - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, - tlb_table[NB_MMU_MODES - 1][1]) - > 0x7ff0 + 0x7fff); - tcg_out32(s, ADDI | RT(r1) | RA(base) | 0x7ff0); - base = r1; - cmp_off -= 0x7ff0; - add_off -= 0x7ff0; - } - - /* Clear the non-page, non-alignment bits from the address. */ - tcg_out32(s, (RLWINM - | RA(r2) - | RS(addrlo) - | SH(0) - | MB((32 - s_bits) & 31) - | ME(31 - TARGET_PAGE_BITS))); - - tcg_out32(s, ADD | RT(r0) | RA(r0) | RB(base)); - base = r0; - - /* Load the tlb comparator. */ - tcg_out32(s, LWZ | RT(r1) | RA(base) | (cmp_off & 0xffff)); - - tcg_out32(s, CMP | BF(7) | RA(r2) | RB(r1)); - - if (TARGET_LONG_BITS == 64) { - tcg_out32(s, LWZ | RT(r1) | RA(base) | ((cmp_off + 4) & 0xffff)); - } - - /* Load the tlb addend for use on the fast path. - Do this asap to minimize load delay. */ - tcg_out32(s, LWZ | RT(r0) | RA(base) | (add_off & 0xffff)); + tcg_out_rld(s, RLDICL, dst, src, 64 - c, c); +} - if (TARGET_LONG_BITS == 64) { - tcg_out32(s, CMP | BF(6) | RA(addrhi) | RB(r1)); - tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); +static void tcg_out_movi32(TCGContext *s, TCGReg ret, int32_t arg) +{ + if (arg == (int16_t) arg) { + tcg_out32(s, ADDI | TAI(ret, 0, arg)); + } else { + tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); + if (arg & 0xffff) { + tcg_out32(s, ORI | SAI(ret, ret, arg)); + } } - - /* Use a conditional branch-and-link so that we load a pointer to - somewhere within the current opcode, for passing on to the helper. - This address cannot be used for a tail call, but it's shorter - than forming an address from scratch. */ - *label_ptr = s->code_ptr; - retranst = *s->code_ptr & 0xfffc; - tcg_out32(s, BC | BI(7, CR_EQ) | retranst | BO_COND_FALSE | LK); } -#endif -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) +static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, + tcg_target_long arg) { - TCGReg addrlo, datalo, datahi, rbase, addrhi __attribute__((unused)); - TCGMemOp opc, bswap; -#ifdef CONFIG_SOFTMMU - int mem_index; - tcg_insn_unit *label_ptr; -#endif - - datalo = *args++; - datahi = (is64 ? *args++ : 0); - addrlo = *args++; - addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); - opc = *args++; - bswap = opc & MO_BSWAP; + tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + if (type == TCG_TYPE_I32 || arg == (int32_t)arg) { + tcg_out_movi32(s, ret, arg); + } else if (arg == (uint32_t)arg && !(arg & 0x8000)) { + tcg_out32(s, ADDI | TAI(ret, 0, arg)); + tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); + } else { + int32_t high; -#ifdef CONFIG_SOFTMMU - mem_index = *args; - tcg_out_tlb_check(s, TCG_REG_R3, TCG_REG_R4, TCG_REG_R0, addrlo, - addrhi, opc & MO_SIZE, mem_index, 0, &label_ptr); - rbase = TCG_REG_R3; -#else /* !CONFIG_SOFTMMU */ - rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; -#endif + if (USE_REG_RA) { + intptr_t diff = arg - (intptr_t)tb_ret_addr; + if (diff == (int32_t)diff) { + tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_RA, diff); + return; + } + } - switch (opc & MO_SSIZE) { - default: - case MO_UB: - tcg_out32(s, LBZX | TAB(datalo, rbase, addrlo)); - break; - case MO_SB: - tcg_out32(s, LBZX | TAB(datalo, rbase, addrlo)); - tcg_out32(s, EXTSB | RA(datalo) | RS(datalo)); - break; - case MO_UW: - tcg_out32(s, (bswap ? LHBRX : LHZX) | TAB(datalo, rbase, addrlo)); - break; - case MO_SW: - if (bswap) { - tcg_out32(s, LHBRX | TAB(datalo, rbase, addrlo)); - tcg_out32(s, EXTSH | RA(datalo) | RS(datalo)); - } else { - tcg_out32(s, LHAX | TAB(datalo, rbase, addrlo)); + high = arg >> 31 >> 1; + tcg_out_movi32(s, ret, high); + if (high) { + tcg_out_shli64(s, ret, ret, 32); } - break; - case MO_UL: - tcg_out32(s, (bswap ? LWBRX : LWZX) | TAB(datalo, rbase, addrlo)); - break; - case MO_Q: - if (bswap) { - tcg_out32(s, ADDI | RT(TCG_REG_R0) | RA(addrlo) | 4); - tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); - tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0)); - } else if (rbase != 0) { - tcg_out32(s, ADDI | RT(TCG_REG_R0) | RA(addrlo) | 4); - tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo)); - tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0)); - } else if (addrlo == datahi) { - tcg_out32(s, LWZ | RT(datalo) | RA(addrlo) | 4); - tcg_out32(s, LWZ | RT(datahi) | RA(addrlo)); - } else { - tcg_out32(s, LWZ | RT(datahi) | RA(addrlo)); - tcg_out32(s, LWZ | RT(datalo) | RA(addrlo) | 4); + if (arg & 0xffff0000) { + tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); + } + if (arg & 0xffff) { + tcg_out32(s, ORI | SAI(ret, ret, arg)); } - break; } -#ifdef CONFIG_SOFTMMU - add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, - addrhi, mem_index, s->code_ptr, label_ptr); -#endif } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) +static bool mask_operand(uint32_t c, int *mb, int *me) { - TCGReg addrlo, datalo, datahi, rbase, addrhi __attribute__((unused)); - TCGMemOp opc, bswap, s_bits; -#ifdef CONFIG_SOFTMMU - int mem_index; - tcg_insn_unit *label_ptr; -#endif + uint32_t lsb, test; + + /* Accept a bit pattern like: + 0....01....1 + 1....10....0 + 0..01..10..0 + Keep track of the transitions. */ + if (c == 0 || c == -1) { + return false; + } + test = c; + lsb = test & -test; + test += lsb; + if (test & (test - 1)) { + return false; + } - datalo = *args++; - datahi = (is64 ? *args++ : 0); - addrlo = *args++; - addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); - opc = *args++; - bswap = opc & MO_BSWAP; - s_bits = opc & MO_SIZE; + *me = clz32(lsb); + *mb = test ? clz32(test & -test) + 1 : 0; + return true; +} -#ifdef CONFIG_SOFTMMU - mem_index = *args; - tcg_out_tlb_check(s, TCG_REG_R3, TCG_REG_R4, TCG_REG_R0, addrlo, - addrhi, s_bits, mem_index, 0, &label_ptr); - rbase = TCG_REG_R3; -#else /* !CONFIG_SOFTMMU */ - rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; -#endif +static bool mask64_operand(uint64_t c, int *mb, int *me) +{ + uint64_t lsb; - switch (s_bits) { - case MO_8: - tcg_out32(s, STBX | SAB(datalo, rbase, addrlo)); - break; - case MO_16: - tcg_out32(s, (bswap ? STHBRX : STHX) | SAB(datalo, rbase, addrlo)); - break; - case MO_32: - default: - tcg_out32(s, (bswap ? STWBRX : STWX) | SAB(datalo, rbase, addrlo)); - break; - case MO_64: - if (bswap) { - tcg_out32(s, ADDI | RT(TCG_REG_R0) | RA(addrlo) | 4); - tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); - tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0)); - } else if (rbase != 0) { - tcg_out32(s, ADDI | RT(TCG_REG_R0) | RA(addrlo) | 4); - tcg_out32(s, STWX | SAB(datahi, rbase, addrlo)); - tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0)); - } else { - tcg_out32(s, STW | RS(datahi) | RA(addrlo)); - tcg_out32(s, STW | RS(datalo) | RA(addrlo) | 4); - } - break; + if (c == 0) { + return false; } -#ifdef CONFIG_SOFTMMU - add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, - mem_index, s->code_ptr, label_ptr); -#endif + lsb = c & -c; + /* Accept 1..10..0. */ + if (c == -lsb) { + *mb = 0; + *me = clz64(lsb); + return true; + } + /* Accept 0..01..1. */ + if (lsb == 1 && (c & (c + 1)) == 0) { + *mb = clz64(c + 1) + 1; + *me = 63; + return true; + } + return false; } -#if defined(CONFIG_SOFTMMU) -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) { - TCGReg ir, datalo, datahi; - TCGMemOp opc = l->opc; + int mb, me; - reloc_pc14(l->label_ptr[0], s->code_ptr); - - ir = TCG_REG_R4; - if (TARGET_LONG_BITS == 32) { - tcg_out_mov(s, TCG_TYPE_I32, ir++, l->addrlo_reg); + if ((c & 0xffff) == c) { + tcg_out32(s, ANDI | SAI(src, dst, c)); + return; + } else if ((c & 0xffff0000) == c) { + tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); + return; + } else if (mask_operand(c, &mb, &me)) { + tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me); } else { -#ifdef TCG_TARGET_CALL_ALIGN_ARGS - ir |= 1; -#endif - tcg_out_mov(s, TCG_TYPE_I32, ir++, l->addrhi_reg); - tcg_out_mov(s, TCG_TYPE_I32, ir++, l->addrlo_reg); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c); + tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); } - tcg_out_movi(s, TCG_TYPE_I32, ir++, l->mem_index); - tcg_out32(s, MFSPR | RT(ir++) | LR); - tcg_out_b(s, LK, ld_trampolines[opc & ~MO_SIGN]); +} - datalo = l->datalo_reg; - switch (opc & MO_SSIZE) { - case MO_SB: - tcg_out32(s, EXTSB | RA(datalo) | RS(TCG_REG_R3)); - break; - case MO_SW: - tcg_out32(s, EXTSH | RA(datalo) | RS(TCG_REG_R3)); - break; - default: - tcg_out_mov(s, TCG_TYPE_I32, datalo, TCG_REG_R3); - break; - case MO_Q: - datahi = l->datahi_reg; - if (datalo != TCG_REG_R3) { - tcg_out_mov(s, TCG_TYPE_I32, datalo, TCG_REG_R4); - tcg_out_mov(s, TCG_TYPE_I32, datahi, TCG_REG_R3); - } else if (datahi != TCG_REG_R4) { - tcg_out_mov(s, TCG_TYPE_I32, datahi, TCG_REG_R3); - tcg_out_mov(s, TCG_TYPE_I32, datalo, TCG_REG_R4); +static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c) +{ + int mb, me; + + assert(TCG_TARGET_REG_BITS == 64); + if ((c & 0xffff) == c) { + tcg_out32(s, ANDI | SAI(src, dst, c)); + return; + } else if ((c & 0xffff0000) == c) { + tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); + return; + } else if (mask64_operand(c, &mb, &me)) { + if (mb == 0) { + tcg_out_rld(s, RLDICR, dst, src, 0, me); } else { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, TCG_REG_R4); - tcg_out_mov(s, TCG_TYPE_I32, datahi, TCG_REG_R3); - tcg_out_mov(s, TCG_TYPE_I32, datalo, TCG_REG_R0); + tcg_out_rld(s, RLDICL, dst, src, 0, mb); } - break; + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c); + tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); } - tcg_out_b(s, 0, l->raddr); } -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c, + int op_lo, int op_hi) { - TCGReg ir, datalo; - TCGMemOp opc = l->opc; - - reloc_pc14(l->label_ptr[0], s->code_ptr); - - ir = TCG_REG_R4; - if (TARGET_LONG_BITS == 32) { - tcg_out_mov (s, TCG_TYPE_I32, ir++, l->addrlo_reg); - } else { -#ifdef TCG_TARGET_CALL_ALIGN_ARGS - ir |= 1; -#endif - tcg_out_mov (s, TCG_TYPE_I32, ir++, l->addrhi_reg); - tcg_out_mov (s, TCG_TYPE_I32, ir++, l->addrlo_reg); + if (c >> 16) { + tcg_out32(s, op_hi | SAI(src, dst, c >> 16)); + src = dst; } - - datalo = l->datalo_reg; - switch (opc & MO_SIZE) { - case MO_8: - tcg_out32(s, (RLWINM | RA (ir) | RS (datalo) - | SH (0) | MB (24) | ME (31))); - break; - case MO_16: - tcg_out32(s, (RLWINM | RA (ir) | RS (datalo) - | SH (0) | MB (16) | ME (31))); - break; - default: - tcg_out_mov(s, TCG_TYPE_I32, ir, datalo); - break; - case MO_64: -#ifdef TCG_TARGET_CALL_ALIGN_ARGS - ir |= 1; -#endif - tcg_out_mov(s, TCG_TYPE_I32, ir++, l->datahi_reg); - tcg_out_mov(s, TCG_TYPE_I32, ir, datalo); - break; + if (c & 0xffff) { + tcg_out32(s, op_lo | SAI(src, dst, c)); + src = dst; } - ir++; - - tcg_out_movi(s, TCG_TYPE_I32, ir++, l->mem_index); - tcg_out32(s, MFSPR | RT(ir++) | LR); - tcg_out_b(s, LK, st_trampolines[opc]); - tcg_out_b(s, 0, l->raddr); } -#endif -#ifdef CONFIG_SOFTMMU -static void emit_ldst_trampoline(TCGContext *s, tcg_insn_unit *ptr) +static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) { - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0); - tcg_out_call1(s, ptr, 0); + tcg_out_zori32(s, dst, src, c, ORI, ORIS); } -#endif -static void tcg_target_qemu_prologue (TCGContext *s) +static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) { - int i, frame_size; - - frame_size = 0 - + LINKAGE_AREA_SIZE - + TCG_STATIC_CALL_ARGS_SIZE - + ARRAY_SIZE (tcg_target_callee_save_regs) * 4 - + CPU_TEMP_BUF_NLONGS * sizeof(long) - ; - frame_size = (frame_size + 15) & ~15; - - tcg_set_frame(s, TCG_REG_CALL_STACK, frame_size - - CPU_TEMP_BUF_NLONGS * sizeof(long), - CPU_TEMP_BUF_NLONGS * sizeof(long)); + tcg_out_zori32(s, dst, src, c, XORI, XORIS); +} -#ifdef _CALL_AIX - { - uintptr_t addr; +static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target) +{ + ptrdiff_t disp = tcg_pcrel_diff(s, target); + if (in_range_b(disp)) { + tcg_out32(s, B | (disp & 0x3fffffc) | mask); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target); + tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR); + tcg_out32(s, BCCTR | BO_ALWAYS | mask); + } +} - /* First emit adhoc function descriptor */ - addr = (uintptr_t)s->code_ptr + 12; - tcg_out32(s, addr); /* entry point */ - tcg_out32(s, 0); /* toc */ - tcg_out32(s, 0); /* environment pointer */ +static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, + TCGReg base, tcg_target_long offset) +{ + tcg_target_long orig = offset, l0, l1, extra = 0, align = 0; + bool is_store = false; + TCGReg rs = TCG_REG_TMP1; + + switch (opi) { + case LD: case LWA: + align = 3; + /* FALLTHRU */ + default: + if (rt != TCG_REG_R0) { + rs = rt; + break; + } + break; + case STD: + align = 3; + /* FALLTHRU */ + case STB: case STH: case STW: + is_store = true; + break; } -#endif - tcg_out32 (s, MFSPR | RT (0) | LR); - tcg_out32 (s, STWU | RS (1) | RA (1) | (-frame_size & 0xffff)); - for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i) - tcg_out32 (s, (STW - | RS (tcg_target_callee_save_regs[i]) - | RA (1) - | (i * 4 + LINKAGE_AREA_SIZE + TCG_STATIC_CALL_ARGS_SIZE) - ) - ); - tcg_out32 (s, STW | RS (0) | RA (1) | (frame_size + LR_OFFSET)); -#ifdef CONFIG_USE_GUEST_BASE - if (GUEST_BASE) { - tcg_out_movi (s, TCG_TYPE_I32, TCG_GUEST_BASE_REG, GUEST_BASE); - tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); + /* For unaligned, or very large offsets, use the indexed form. */ + if (offset & align || offset != (int32_t)offset) { + tcg_debug_assert(rs != base && (!is_store || rs != rt)); + tcg_out_movi(s, TCG_TYPE_PTR, rs, orig); + tcg_out32(s, opx | TAB(rt, base, rs)); + return; } -#endif - tcg_out_mov (s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); - tcg_out32 (s, MTSPR | RS (tcg_target_call_iarg_regs[1]) | CTR); - tcg_out32 (s, BCCTR | BO_ALWAYS); - tb_ret_addr = s->code_ptr; - - for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i) - tcg_out32 (s, (LWZ - | RT (tcg_target_callee_save_regs[i]) - | RA (1) - | (i * 4 + LINKAGE_AREA_SIZE + TCG_STATIC_CALL_ARGS_SIZE) - ) - ); - tcg_out32 (s, LWZ | RT (0) | RA (1) | (frame_size + LR_OFFSET)); - tcg_out32 (s, MTSPR | RS (0) | LR); - tcg_out32 (s, ADDI | RT (1) | RA (1) | frame_size); - tcg_out32 (s, BCLR | BO_ALWAYS); + l0 = (int16_t)offset; + offset = (offset - l0) >> 16; + l1 = (int16_t)offset; -#ifdef CONFIG_SOFTMMU - for (i = 0; i < 16; ++i) { - if (qemu_ld_helpers[i]) { - ld_trampolines[i] = s->code_ptr; - emit_ldst_trampoline(s, qemu_ld_helpers[i]); - } - if (qemu_st_helpers[i]) { - st_trampolines[i] = s->code_ptr; - emit_ldst_trampoline(s, qemu_st_helpers[i]); - } + if (l1 < 0 && orig >= 0) { + extra = 0x4000; + l1 = (int16_t)(offset - 0x4000); + } + if (l1) { + tcg_out32(s, ADDIS | TAI(rs, base, l1)); + base = rs; + } + if (extra) { + tcg_out32(s, ADDIS | TAI(rs, base, extra)); + base = rs; + } + if (opi != ADDI || base != rt || l0 != 0) { + tcg_out32(s, opi | TAI(rt, base, l0)); } -#endif } -static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, - intptr_t arg2) +static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, + TCGReg arg1, intptr_t arg2) { - tcg_out_ldst (s, ret, arg1, arg2, LWZ, LWZX); -} + int opi, opx; -static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, - intptr_t arg2) -{ - tcg_out_ldst (s, arg, arg1, arg2, STW, STWX); + assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + if (type == TCG_TYPE_I32) { + opi = LWZ, opx = LWZX; + } else { + opi = LD, opx = LDX; + } + tcg_out_mem_long(s, opi, opx, ret, arg1, arg2); } -static void ppc_addi (TCGContext *s, int rt, int ra, tcg_target_long si) +static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) { - if (!si && rt == ra) - return; + int opi, opx; - if (si == (int16_t) si) - tcg_out32 (s, ADDI | RT (rt) | RA (ra) | (si & 0xffff)); - else { - uint16_t h = ((si >> 16) & 0xffff) + ((uint16_t) si >> 15); - tcg_out32 (s, ADDIS | RT (rt) | RA (ra) | h); - tcg_out32 (s, ADDI | RT (rt) | RA (rt) | (si & 0xffff)); + assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + if (type == TCG_TYPE_I32) { + opi = STW, opx = STWX; + } else { + opi = STD, opx = STDX; } + tcg_out_mem_long(s, opi, opx, arg, arg1, arg2); } -static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, - int const_arg2, int cr) +static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, + int const_arg2, int cr, TCGType type) { int imm; uint32_t op; + tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + + /* Simplify the comparisons below wrt CMPI. */ + if (type == TCG_TYPE_I32) { + arg2 = (int32_t)arg2; + } + switch (cond) { case TCG_COND_EQ: case TCG_COND_NE: @@ -994,8 +881,7 @@ static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, op = CMPI; imm = 1; break; - } - else if ((uint16_t) arg2 == arg2) { + } else if ((uint16_t) arg2 == arg2) { op = CMPLI; imm = 1; break; @@ -1036,148 +922,144 @@ static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, break; default: - tcg_abort (); + tcg_abort(); } - op |= BF (cr); + op |= BF(cr) | ((type == TCG_TYPE_I64) << 21); - if (imm) - tcg_out32 (s, op | RA (arg1) | (arg2 & 0xffff)); - else { + if (imm) { + tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff)); + } else { if (const_arg2) { - tcg_out_movi (s, TCG_TYPE_I32, 0, arg2); - tcg_out32 (s, op | RA (arg1) | RB (0)); + tcg_out_movi(s, type, TCG_REG_R0, arg2); + arg2 = TCG_REG_R0; } - else - tcg_out32 (s, op | RA (arg1) | RB (arg2)); + tcg_out32(s, op | RA(arg1) | RB(arg2)); } - } -static void tcg_out_bc(TCGContext *s, int bc, int label_index) +static void tcg_out_setcond_eq0(TCGContext *s, TCGType type, + TCGReg dst, TCGReg src) { - TCGLabel *l = &s->labels[label_index]; - - if (l->has_value) { - tcg_out32(s, bc | reloc_pc14_val(s->code_ptr, l->u.value_ptr)); + if (type == TCG_TYPE_I32) { + tcg_out32(s, CNTLZW | RS(src) | RA(dst)); + tcg_out_shri32(s, dst, dst, 5); } else { - /* Thanks to Andrzej Zaborowski */ - tcg_insn_unit retrans = *s->code_ptr & 0xfffc; - tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, label_index, 0); - tcg_out32(s, bc | retrans); + tcg_out32(s, CNTLZD | RS(src) | RA(dst)); + tcg_out_shri64(s, dst, dst, 6); } } -static void tcg_out_cr7eq_from_cond (TCGContext *s, const TCGArg *args, - const int *const_args) +static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src) { - TCGCond cond = args[4]; - int op; - struct { int bit1; int bit2; int cond2; } bits[] = { - [TCG_COND_LT ] = { CR_LT, CR_LT, TCG_COND_LT }, - [TCG_COND_LE ] = { CR_LT, CR_GT, TCG_COND_LT }, - [TCG_COND_GT ] = { CR_GT, CR_GT, TCG_COND_GT }, - [TCG_COND_GE ] = { CR_GT, CR_LT, TCG_COND_GT }, - [TCG_COND_LTU] = { CR_LT, CR_LT, TCG_COND_LTU }, - [TCG_COND_LEU] = { CR_LT, CR_GT, TCG_COND_LTU }, - [TCG_COND_GTU] = { CR_GT, CR_GT, TCG_COND_GTU }, - [TCG_COND_GEU] = { CR_GT, CR_LT, TCG_COND_GTU }, - }, *b = &bits[cond]; + /* X != 0 implies X + -1 generates a carry. Extra addition + trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */ + if (dst != src) { + tcg_out32(s, ADDIC | TAI(dst, src, -1)); + tcg_out32(s, SUBFE | TAB(dst, dst, src)); + } else { + tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1)); + tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src)); + } +} - switch (cond) { - case TCG_COND_EQ: - case TCG_COND_NE: - op = (cond == TCG_COND_EQ) ? CRAND : CRNAND; - tcg_out_cmp (s, cond, args[0], args[2], const_args[2], 6); - tcg_out_cmp (s, cond, args[1], args[3], const_args[3], 7); - tcg_out32 (s, op | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, CR_EQ)); - break; - case TCG_COND_LT: - case TCG_COND_LE: - case TCG_COND_GT: - case TCG_COND_GE: - case TCG_COND_LTU: - case TCG_COND_LEU: - case TCG_COND_GTU: - case TCG_COND_GEU: - op = (b->bit1 != b->bit2) ? CRANDC : CRAND; - tcg_out_cmp (s, b->cond2, args[1], args[3], const_args[3], 5); - tcg_out_cmp (s, tcg_unsigned_cond (cond), args[0], args[2], - const_args[2], 7); - tcg_out32 (s, op | BT (7, CR_EQ) | BA (5, CR_EQ) | BB (7, b->bit2)); - tcg_out32 (s, CROR | BT (7, CR_EQ) | BA (5, b->bit1) | BB (7, CR_EQ)); - break; - default: - tcg_abort(); +static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2, + bool const_arg2) +{ + if (const_arg2) { + if ((uint32_t)arg2 == arg2) { + tcg_out_xori32(s, TCG_REG_R0, arg1, arg2); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2); + tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0)); + } + } else { + tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2)); } + return TCG_REG_R0; } -static void tcg_out_setcond (TCGContext *s, TCGCond cond, TCGArg arg0, - TCGArg arg1, TCGArg arg2, int const_arg2) +static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGArg arg0, TCGArg arg1, TCGArg arg2, + int const_arg2) { - int crop, sh, arg; + int crop, sh; - switch (cond) { - case TCG_COND_EQ: - if (const_arg2) { - if (!arg2) { - arg = arg1; - } - else { - arg = 0; - if ((uint16_t) arg2 == arg2) { - tcg_out32 (s, XORI | RS (arg1) | RA (0) | arg2); - } - else { - tcg_out_movi (s, TCG_TYPE_I32, 0, arg2); - tcg_out32 (s, XOR | SAB (arg1, 0, 0)); - } - } - } - else { - arg = 0; - tcg_out32 (s, XOR | SAB (arg1, 0, arg2)); - } - tcg_out32 (s, CNTLZW | RS (arg) | RA (0)); - tcg_out32 (s, (RLWINM - | RA (arg0) - | RS (0) - | SH (27) - | MB (5) - | ME (31) - ) - ); - break; + assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - case TCG_COND_NE: - if (const_arg2) { - if (!arg2) { - arg = arg1; + /* Ignore high bits of a potential constant arg2. */ + if (type == TCG_TYPE_I32) { + arg2 = (uint32_t)arg2; + } + + /* Handle common and trivial cases before handling anything else. */ + if (arg2 == 0) { + switch (cond) { + case TCG_COND_EQ: + tcg_out_setcond_eq0(s, type, arg0, arg1); + return; + case TCG_COND_NE: + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + tcg_out_ext32u(s, TCG_REG_R0, arg1); + arg1 = TCG_REG_R0; } - else { - arg = 0; - if ((uint16_t) arg2 == arg2) { - tcg_out32 (s, XORI | RS (arg1) | RA (0) | arg2); - } - else { - tcg_out_movi (s, TCG_TYPE_I32, 0, arg2); - tcg_out32 (s, XOR | SAB (arg1, 0, 0)); - } + tcg_out_setcond_ne0(s, arg0, arg1); + return; + case TCG_COND_GE: + tcg_out32(s, NOR | SAB(arg1, arg0, arg1)); + arg1 = arg0; + /* FALLTHRU */ + case TCG_COND_LT: + /* Extract the sign bit. */ + if (type == TCG_TYPE_I32) { + tcg_out_shri32(s, arg0, arg1, 31); + } else { + tcg_out_shri64(s, arg0, arg1, 63); } + return; + default: + break; } - else { - arg = 0; - tcg_out32 (s, XOR | SAB (arg1, 0, arg2)); - } + } + + /* If we have ISEL, we can implement everything with 3 or 4 insns. + All other cases below are also at least 3 insns, so speed up the + code generator by not considering them and always using ISEL. */ + if (HAVE_ISEL) { + int isel, tab; + + tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); - if (arg == arg1 && arg1 == arg0) { - tcg_out32 (s, ADDIC | RT (0) | RA (arg) | 0xffff); - tcg_out32 (s, SUBFE | TAB (arg0, 0, arg)); + isel = tcg_to_isel[cond]; + + tcg_out_movi(s, type, arg0, 1); + if (isel & 1) { + /* arg0 = (bc ? 0 : 1) */ + tab = TAB(arg0, 0, arg0); + isel &= ~1; + } else { + /* arg0 = (bc ? 1 : 0) */ + tcg_out_movi(s, type, TCG_REG_R0, 0); + tab = TAB(arg0, arg0, TCG_REG_R0); } - else { - tcg_out32 (s, ADDIC | RT (arg0) | RA (arg) | 0xffff); - tcg_out32 (s, SUBFE | TAB (arg0, arg0, arg)); + tcg_out32(s, isel | tab); + return; + } + + switch (cond) { + case TCG_COND_EQ: + arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); + tcg_out_setcond_eq0(s, type, arg0, arg1); + return; + + case TCG_COND_NE: + arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); + /* Discard the high bits only once, rather than both inputs. */ + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + tcg_out_ext32u(s, TCG_REG_R0, arg1); + arg1 = TCG_REG_R0; } - break; + tcg_out_setcond_ne0(s, arg0, arg1); + return; case TCG_COND_GT: case TCG_COND_GTU: @@ -1194,176 +1076,788 @@ static void tcg_out_setcond (TCGContext *s, TCGCond cond, TCGArg arg0, case TCG_COND_GE: case TCG_COND_GEU: sh = 31; - crop = CRNOR | BT (7, CR_EQ) | BA (7, CR_LT) | BB (7, CR_LT); + crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT); goto crtest; case TCG_COND_LE: case TCG_COND_LEU: sh = 31; - crop = CRNOR | BT (7, CR_EQ) | BA (7, CR_GT) | BB (7, CR_GT); + crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT); crtest: - tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7); - if (crop) tcg_out32 (s, crop); - tcg_out32 (s, MFCR | RT (0)); - tcg_out32 (s, (RLWINM - | RA (arg0) - | RS (0) - | SH (sh) - | MB (31) - | ME (31) - ) - ); + tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); + if (crop) { + tcg_out32(s, crop); + } + tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7)); + tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31); break; default: - tcg_abort (); + tcg_abort(); } } -static void tcg_out_setcond2 (TCGContext *s, const TCGArg *args, - const int *const_args) +static void tcg_out_bc(TCGContext *s, int bc, int label_index) { - tcg_out_cr7eq_from_cond (s, args + 1, const_args + 1); - tcg_out32 (s, MFCR | RT (0)); - tcg_out32 (s, (RLWINM - | RA (args[0]) - | RS (0) - | SH (31) - | MB (31) - | ME (31) - ) - ); + TCGLabel *l = &s->labels[label_index]; + + if (l->has_value) { + tcg_out32(s, bc | reloc_pc14_val(s->code_ptr, l->u.value_ptr)); + } else { + tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, label_index, 0); + tcg_out_bc_noaddr(s, bc); + } } -static void tcg_out_movcond (TCGContext *s, TCGCond cond, - TCGArg dest, - TCGArg c1, TCGArg c2, - TCGArg v1, TCGArg v2, - int const_c2) +static void tcg_out_brcond(TCGContext *s, TCGCond cond, + TCGArg arg1, TCGArg arg2, int const_arg2, + int label_index, TCGType type) { - tcg_out_cmp (s, cond, c1, c2, const_c2, 7); + tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); + tcg_out_bc(s, tcg_to_bc[cond], label_index); +} + +static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1, + TCGArg v2, bool const_c2) +{ + /* If for some reason both inputs are zero, don't produce bad code. */ + if (v1 == 0 && v2 == 0) { + tcg_out_movi(s, type, dest, 0); + return; + } + + tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type); + + if (HAVE_ISEL) { + int isel = tcg_to_isel[cond]; - if (1) { - /* At least here on 7747A bit twiddling hacks are outperformed - by jumpy code (the testing was not scientific) */ + /* Swap the V operands if the operation indicates inversion. */ + if (isel & 1) { + int t = v1; + v1 = v2; + v2 = t; + isel &= ~1; + } + /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */ + if (v2 == 0) { + tcg_out_movi(s, type, TCG_REG_R0, 0); + } + tcg_out32(s, isel | TAB(dest, v1, v2)); + } else { if (dest == v2) { - cond = tcg_invert_cond (cond); + cond = tcg_invert_cond(cond); v2 = v1; - } - else { - if (dest != v1) { - tcg_out_mov (s, TCG_TYPE_I32, dest, v1); + } else if (dest != v1) { + if (v1 == 0) { + tcg_out_movi(s, type, dest, 0); + } else { + tcg_out_mov(s, type, dest, v1); } } /* Branch forward over one insn */ - tcg_out32 (s, tcg_to_bc[cond] | 8); - tcg_out_mov (s, TCG_TYPE_I32, dest, v2); + tcg_out32(s, tcg_to_bc[cond] | 8); + if (v2 == 0) { + tcg_out_movi(s, type, dest, 0); + } else { + tcg_out_mov(s, type, dest, v2); + } } - else { - /* isel version, "if (1)" above should be replaced once a way - to figure out availability of isel on the underlying - hardware is found */ - int tab, bc; +} - switch (cond) { - case TCG_COND_EQ: - tab = TAB (dest, v1, v2); - bc = CR_EQ; - break; - case TCG_COND_NE: - tab = TAB (dest, v2, v1); - bc = CR_EQ; - break; - case TCG_COND_LTU: - case TCG_COND_LT: - tab = TAB (dest, v1, v2); - bc = CR_LT; - break; - case TCG_COND_GEU: - case TCG_COND_GE: - tab = TAB (dest, v2, v1); - bc = CR_LT; - break; - case TCG_COND_LEU: - case TCG_COND_LE: - tab = TAB (dest, v2, v1); - bc = CR_GT; - break; - case TCG_COND_GTU: - case TCG_COND_GT: - tab = TAB (dest, v1, v2); - bc = CR_GT; - break; - default: - tcg_abort (); - } - tcg_out32 (s, ISEL | tab | ((bc + 28) << 6)); +static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, + const int *const_args) +{ + static const struct { uint8_t bit1, bit2; } bits[] = { + [TCG_COND_LT ] = { CR_LT, CR_LT }, + [TCG_COND_LE ] = { CR_LT, CR_GT }, + [TCG_COND_GT ] = { CR_GT, CR_GT }, + [TCG_COND_GE ] = { CR_GT, CR_LT }, + [TCG_COND_LTU] = { CR_LT, CR_LT }, + [TCG_COND_LEU] = { CR_LT, CR_GT }, + [TCG_COND_GTU] = { CR_GT, CR_GT }, + [TCG_COND_GEU] = { CR_GT, CR_LT }, + }; + + TCGCond cond = args[4], cond2; + TCGArg al, ah, bl, bh; + int blconst, bhconst; + int op, bit1, bit2; + + al = args[0]; + ah = args[1]; + bl = args[2]; + bh = args[3]; + blconst = const_args[2]; + bhconst = const_args[3]; + + switch (cond) { + case TCG_COND_EQ: + op = CRAND; + goto do_equality; + case TCG_COND_NE: + op = CRNAND; + do_equality: + tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32); + tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32); + tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); + break; + + case TCG_COND_LT: + case TCG_COND_LE: + case TCG_COND_GT: + case TCG_COND_GE: + case TCG_COND_LTU: + case TCG_COND_LEU: + case TCG_COND_GTU: + case TCG_COND_GEU: + bit1 = bits[cond].bit1; + bit2 = bits[cond].bit2; + op = (bit1 != bit2 ? CRANDC : CRAND); + cond2 = tcg_unsigned_cond(cond); + + tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32); + tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32); + tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2)); + tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ)); + break; + + default: + tcg_abort(); } } -static void tcg_out_brcond (TCGContext *s, TCGCond cond, - TCGArg arg1, TCGArg arg2, int const_arg2, - int label_index) +static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, + const int *const_args) { - tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7); - tcg_out_bc (s, tcg_to_bc[cond], label_index); + tcg_out_cmp2(s, args + 1, const_args + 1); + tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7)); + tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31); } -/* XXX: we implement it at the target level to avoid having to - handle cross basic blocks temporaries */ static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args, const int *const_args) { - tcg_out_cr7eq_from_cond (s, args, const_args); - tcg_out_bc (s, (BC | BI (7, CR_EQ) | BO_COND_TRUE), args[5]); + tcg_out_cmp2(s, args, const_args); + tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, args[5]); +} + +void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) +{ + TCGContext s; + + s.code_buf = s.code_ptr = (tcg_insn_unit *)jmp_addr; + tcg_out_b(&s, 0, (tcg_insn_unit *)addr); + flush_icache_range(jmp_addr, jmp_addr + tcg_current_code_size(&s)); +} + +static void tcg_out_call(TCGContext *s, tcg_insn_unit *target) +{ +#ifdef _CALL_AIX + /* Look through the descriptor. If the branch is in range, and we + don't have to spend too much effort on building the toc. */ + void *tgt = ((void **)target)[0]; + uintptr_t toc = ((uintptr_t *)target)[1]; + intptr_t diff = tcg_pcrel_diff(s, tgt); + + if (in_range_b(diff) && toc == (uint32_t)toc) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc); + tcg_out_b(s, LK, tgt); + } else { + /* Fold the low bits of the constant into the addresses below. */ + intptr_t arg = (intptr_t)target; + int ofs = (int16_t)arg; + + if (ofs + 8 < 0x8000) { + arg -= ofs; + } else { + ofs = 0; + } + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs); + tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP); + tcg_out32(s, BCCTR | BO_ALWAYS | LK); + } +#elif defined(_CALL_ELF) && _CALL_ELF == 2 + intptr_t diff; + + /* In the ELFv2 ABI, we have to set up r12 to contain the destination + address, which the callee uses to compute its TOC address. */ + /* FIXME: when the branch is in range, we could avoid r12 load if we + knew that the destination uses the same TOC, and what its local + entry point offset is. */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target); + + diff = tcg_pcrel_diff(s, target); + if (in_range_b(diff)) { + tcg_out_b(s, LK, target); + } else { + tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR); + tcg_out32(s, BCCTR | BO_ALWAYS | LK); + } +#else + tcg_out_b(s, LK, target); +#endif +} + +static const uint32_t qemu_ldx_opc[16] = { + [MO_UB] = LBZX, + [MO_UW] = LHZX, + [MO_UL] = LWZX, + [MO_Q] = LDX, + [MO_SW] = LHAX, + [MO_SL] = LWAX, + [MO_BSWAP | MO_UB] = LBZX, + [MO_BSWAP | MO_UW] = LHBRX, + [MO_BSWAP | MO_UL] = LWBRX, + [MO_BSWAP | MO_Q] = LDBRX, +}; + +static const uint32_t qemu_stx_opc[16] = { + [MO_UB] = STBX, + [MO_UW] = STHX, + [MO_UL] = STWX, + [MO_Q] = STDX, + [MO_BSWAP | MO_UB] = STBX, + [MO_BSWAP | MO_UW] = STHBRX, + [MO_BSWAP | MO_UL] = STWBRX, + [MO_BSWAP | MO_Q] = STDBRX, +}; + +static const uint32_t qemu_exts_opc[4] = { + EXTSB, EXTSH, EXTSW, 0 +}; + +#if defined (CONFIG_SOFTMMU) +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + * int mmu_idx, uintptr_t ra) + */ +static void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + * uintxx_t val, int mmu_idx, uintptr_t ra) + */ +static void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +}; + +/* Perform the TLB load and compare. Places the result of the comparison + in CR7, loads the addend of the TLB into R3, and returns the register + containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ + +static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits, + TCGReg addrlo, TCGReg addrhi, + int mem_index, bool is_read) +{ + int cmp_off + = (is_read + ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) + : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); + int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); + TCGReg base = TCG_AREG0; + + /* Extract the page index, shifted into place for tlb index. */ + if (TCG_TARGET_REG_BITS == 64) { + if (TARGET_LONG_BITS == 32) { + /* Zero-extend the address into a place helpful for further use. */ + tcg_out_ext32u(s, TCG_REG_R4, addrlo); + addrlo = TCG_REG_R4; + } else { + tcg_out_rld(s, RLDICL, TCG_REG_R3, addrlo, + 64 - TARGET_PAGE_BITS, 64 - CPU_TLB_BITS); + } + } + + /* Compensate for very large offsets. */ + if (add_off >= 0x8000) { + /* Most target env are smaller than 32k; none are larger than 64k. + Simplify the logic here merely to offset by 0x7ff0, giving us a + range just shy of 64k. Check this assumption. */ + QEMU_BUILD_BUG_ON(offsetof(CPUArchState, + tlb_table[NB_MMU_MODES - 1][1]) + > 0x7ff0 + 0x7fff); + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, base, 0x7ff0)); + base = TCG_REG_TMP1; + cmp_off -= 0x7ff0; + add_off -= 0x7ff0; + } + + /* Extraction and shifting, part 2. */ + if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) { + tcg_out_rlw(s, RLWINM, TCG_REG_R3, addrlo, + 32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), + 32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS), + 31 - CPU_TLB_ENTRY_BITS); + } else { + tcg_out_shli64(s, TCG_REG_R3, TCG_REG_R3, CPU_TLB_ENTRY_BITS); + } + + tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, base)); + + /* Load the tlb comparator. */ + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off); + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4); + } else { + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off); + } + + /* Load the TLB addend for use on the fast path. Do this asap + to minimize any load use delay. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, add_off); + + /* Clear the non-page, non-alignment bits from the address. */ + if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) { + tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, + (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS); + } else if (!s_bits) { + tcg_out_rld(s, RLDICR, TCG_REG_R0, addrlo, + 0, 63 - TARGET_PAGE_BITS); + } else { + tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo, + 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits); + tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); + } + + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, + 0, 7, TCG_TYPE_I32); + tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32); + tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); + } else { + tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, + 0, 7, TCG_TYPE_TL); + } + + return addrlo; +} + +/* Record the context of a call to the out of line helper code for the slow + path for a load or store, so that we can later generate the correct + helper code. */ +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, + TCGReg datalo_reg, TCGReg datahi_reg, + TCGReg addrlo_reg, TCGReg addrhi_reg, + int mem_index, tcg_insn_unit *raddr, + tcg_insn_unit *lptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->opc = opc; + label->datalo_reg = datalo_reg; + label->datahi_reg = datahi_reg; + label->addrlo_reg = addrlo_reg; + label->addrhi_reg = addrhi_reg; + label->mem_index = mem_index; + label->raddr = raddr; + label->label_ptr[0] = lptr; +} + +static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + TCGMemOp opc = lb->opc; + TCGReg hi, lo, arg = TCG_REG_R3; + + reloc_pc14(lb->label_ptr[0], s->code_ptr); + + tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); + + lo = lb->addrlo_reg; + hi = lb->addrhi_reg; + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { +#ifdef TCG_TARGET_CALL_ALIGN_ARGS + arg |= 1; +#endif + tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); + tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); + } else { + /* If the address needed to be zero-extended, we'll have already + placed it in R4. The only remaining case is 64-bit guest. */ + tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); + } + + tcg_out_movi(s, TCG_TYPE_I32, arg++, lb->mem_index); + tcg_out32(s, MFSPR | RT(arg) | LR); + + tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); + + lo = lb->datalo_reg; + hi = lb->datahi_reg; + if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { + tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4); + tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3); + } else if (opc & MO_SIGN) { + uint32_t insn = qemu_exts_opc[opc & MO_SIZE]; + tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3)); + } else { + tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3); + } + + tcg_out_b(s, 0, lb->raddr); +} + +static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + TCGMemOp opc = lb->opc; + TCGMemOp s_bits = opc & MO_SIZE; + TCGReg hi, lo, arg = TCG_REG_R3; + + reloc_pc14(lb->label_ptr[0], s->code_ptr); + + tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); + + lo = lb->addrlo_reg; + hi = lb->addrhi_reg; + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { +#ifdef TCG_TARGET_CALL_ALIGN_ARGS + arg |= 1; +#endif + tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); + tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); + } else { + /* If the address needed to be zero-extended, we'll have already + placed it in R4. The only remaining case is 64-bit guest. */ + tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); + } + + lo = lb->datalo_reg; + hi = lb->datahi_reg; + if (TCG_TARGET_REG_BITS == 32) { + switch (s_bits) { + case MO_64: +#ifdef TCG_TARGET_CALL_ALIGN_ARGS + arg |= 1; +#endif + tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); + /* FALLTHRU */ + case MO_32: + tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); + break; + default: + tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31); + break; + } + } else { + if (s_bits == MO_64) { + tcg_out_mov(s, TCG_TYPE_I64, arg++, lo); + } else { + tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits)); + } + } + + tcg_out_movi(s, TCG_TYPE_I32, arg++, lb->mem_index); + tcg_out32(s, MFSPR | RT(arg) | LR); + + tcg_out_call(s, qemu_st_helpers[opc]); + + tcg_out_b(s, 0, lb->raddr); +} +#endif /* SOFTMMU */ + +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) +{ + TCGReg datalo, datahi, addrlo, rbase; + TCGReg addrhi __attribute__((unused)); + TCGMemOp opc, s_bits; +#ifdef CONFIG_SOFTMMU + int mem_index; + tcg_insn_unit *label_ptr; +#endif + + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); + opc = *args++; + s_bits = opc & MO_SIZE; + +#ifdef CONFIG_SOFTMMU + mem_index = *args; + addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, true); + + /* Load a pointer into the current opcode w/conditional branch-link. */ + label_ptr = s->code_ptr; + tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); + + rbase = TCG_REG_R3; +#else /* !CONFIG_SOFTMMU */ + rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); + addrlo = TCG_REG_TMP1; + } +#endif + + if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { + if (opc & MO_BSWAP) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); + tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); + tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0)); + } else if (rbase != 0) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); + tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo)); + tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0)); + } else if (addrlo == datahi) { + tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); + tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); + } else { + tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); + tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); + } + } else { + uint32_t insn = qemu_ldx_opc[opc]; + if (!HAVE_ISA_2_06 && insn == LDBRX) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); + tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); + tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); + tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0); + } else if (insn) { + tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); + } else { + insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; + tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); + insn = qemu_exts_opc[s_bits]; + tcg_out32(s, insn | RA(datalo) | RS(datalo)); + } + } + +#ifdef CONFIG_SOFTMMU + add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); +#endif } -void ppc_tb_set_jmp_target (unsigned long jmp_addr, unsigned long addr) +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { - uint32_t *ptr; - long disp = addr - jmp_addr; - unsigned long patch_size; - - ptr = (uint32_t *)jmp_addr; - - if ((disp << 6) >> 6 != disp) { - ptr[0] = 0x3c000000 | (addr >> 16); /* lis 0,addr@ha */ - ptr[1] = 0x60000000 | (addr & 0xffff); /* la 0,addr@l(0) */ - ptr[2] = 0x7c0903a6; /* mtctr 0 */ - ptr[3] = 0x4e800420; /* brctr */ - patch_size = 16; + TCGReg datalo, datahi, addrlo, rbase; + TCGReg addrhi __attribute__((unused)); + TCGMemOp opc, s_bits; +#ifdef CONFIG_SOFTMMU + int mem_index; + tcg_insn_unit *label_ptr; +#endif + + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); + opc = *args++; + s_bits = opc & MO_SIZE; + +#ifdef CONFIG_SOFTMMU + mem_index = *args; + addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, false); + + /* Load a pointer into the current opcode w/conditional branch-link. */ + label_ptr = s->code_ptr; + tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); + + rbase = TCG_REG_R3; +#else /* !CONFIG_SOFTMMU */ + rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); + addrlo = TCG_REG_TMP1; + } +#endif + + if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { + if (opc & MO_BSWAP) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); + tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); + tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0)); + } else if (rbase != 0) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); + tcg_out32(s, STWX | SAB(datahi, rbase, addrlo)); + tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0)); + } else { + tcg_out32(s, STW | TAI(datahi, addrlo, 0)); + tcg_out32(s, STW | TAI(datalo, addrlo, 4)); + } } else { - /* patch the branch destination */ - if (disp != 16) { - *ptr = 0x48000000 | (disp & 0x03fffffc); /* b disp */ - patch_size = 4; + uint32_t insn = qemu_stx_opc[opc]; + if (!HAVE_ISA_2_06 && insn == STDBRX) { + tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4)); + tcg_out_shri64(s, TCG_REG_R0, datalo, 32); + tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1)); } else { - ptr[0] = 0x60000000; /* nop */ - ptr[1] = 0x60000000; - ptr[2] = 0x60000000; - ptr[3] = 0x60000000; - patch_size = 16; + tcg_out32(s, insn | SAB(datalo, rbase, addrlo)); } } - /* flush icache */ - flush_icache_range(jmp_addr, jmp_addr + patch_size); + +#ifdef CONFIG_SOFTMMU + add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); +#endif +} + +/* Parameters for function call generation, used in tcg.c. */ +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_EXTEND_ARGS 1 + +#ifdef _CALL_AIX +# define LINK_AREA_SIZE (6 * SZR) +# define LR_OFFSET (1 * SZR) +# define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR) +#elif TCG_TARGET_REG_BITS == 64 +# if defined(_CALL_ELF) && _CALL_ELF == 2 +# define LINK_AREA_SIZE (4 * SZR) +# define LR_OFFSET (1 * SZR) +# endif +#else /* TCG_TARGET_REG_BITS == 32 */ +# if defined(_CALL_SYSV) +# define LINK_AREA_SIZE (2 * SZR) +# define LR_OFFSET (1 * SZR) +# elif defined(TCG_TARGET_CALL_DARWIN) +# define LINK_AREA_SIZE 24 +# define LR_OFFSET 8 +# endif +#endif +#ifndef LR_OFFSET +# error "Unhandled abi" +#endif +#ifndef TCG_TARGET_CALL_STACK_OFFSET +# define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE +#endif + +#define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) +#define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR) + +#define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \ + + TCG_STATIC_CALL_ARGS_SIZE \ + + CPU_TEMP_BUF_SIZE \ + + REG_SAVE_SIZE \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & -TCG_TARGET_STACK_ALIGN) + +#define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE) + +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int i; + +#ifdef _CALL_AIX + void **desc = (void **)s->code_ptr; + desc[0] = desc + 2; /* entry point */ + desc[1] = 0; /* environment pointer */ + s->code_ptr = (void *)(desc + 2); /* skip over descriptor */ +#endif + + tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE, + CPU_TEMP_BUF_SIZE); + + /* Prologue */ + tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR); + tcg_out32(s, (SZR == 8 ? STDU : STWU) + | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE)); + + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { + tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], + TCG_REG_R1, REG_SAVE_BOT + i * SZR); + } + tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); + +#ifdef CONFIG_USE_GUEST_BASE + if (GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); + tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); + } +#endif + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR); + + if (USE_REG_RA) { +#ifdef _CALL_AIX + /* Make the caller load the value as the TOC into R2. */ + tb_ret_addr = s->code_ptr + 2; + desc[1] = tb_ret_addr; + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_RA, TCG_REG_R2); + tcg_out32(s, BCCTR | BO_ALWAYS); +#elif defined(_CALL_ELF) && _CALL_ELF == 2 + /* Compute from the incoming R12 value. */ + tb_ret_addr = s->code_ptr + 2; + tcg_out32(s, ADDI | TAI(TCG_REG_RA, TCG_REG_R12, + tcg_ptr_byte_diff(tb_ret_addr, s->code_buf))); + tcg_out32(s, BCCTR | BO_ALWAYS); +#else + /* Reserve max 5 insns for the constant load. */ + tb_ret_addr = s->code_ptr + 6; + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)tb_ret_addr); + tcg_out32(s, BCCTR | BO_ALWAYS); + while (s->code_ptr < tb_ret_addr) { + tcg_out32(s, NOP); + } +#endif + } else { + tcg_out32(s, BCCTR | BO_ALWAYS); + tb_ret_addr = s->code_ptr; + } + + /* Epilogue */ + assert(tb_ret_addr == s->code_ptr); + + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { + tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], + TCG_REG_R1, REG_SAVE_BOT + i * SZR); + } + tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR); + tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE)); + tcg_out32(s, BCLR | BO_ALWAYS); } static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { + TCGArg a0, a1, a2; + int c; + switch (opc) { case INDEX_op_exit_tb: - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R3, args[0]); + if (USE_REG_RA) { + ptrdiff_t disp = tcg_pcrel_diff(s, tb_ret_addr); + + /* If we can use a direct branch, otherwise use the value in RA. + Note that the direct branch is always forward. If it's in + range now, it'll still be in range after the movi. Don't + bother about the 20 bytes where the test here fails but it + would succeed below. */ + if (!in_range_b(disp)) { + tcg_out32(s, MTSPR | RS(TCG_REG_RA) | CTR); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]); + tcg_out32(s, BCCTR | BO_ALWAYS); + break; + } + } + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]); tcg_out_b(s, 0, tb_ret_addr); break; case INDEX_op_goto_tb: if (s->tb_jmp_offset) { - /* direct jump method */ + /* Direct jump method. */ s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); - s->code_ptr += 4; + s->code_ptr += 7; } else { - tcg_abort (); + /* Indirect jump method. */ + tcg_abort(); } s->tb_next_offset[args[0]] = tcg_current_code_size(s); break; @@ -1374,451 +1868,519 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, if (l->has_value) { tcg_out_b(s, 0, l->u.value_ptr); } else { - /* Thanks to Andrzej Zaborowski */ - tcg_insn_unit retrans = *s->code_ptr & 0x3fffffc; tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, args[0], 0); - tcg_out32(s, B | retrans); + tcg_out_b_noaddr(s, B); } } break; case INDEX_op_ld8u_i32: - tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX); + case INDEX_op_ld8u_i64: + tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); break; case INDEX_op_ld8s_i32: - tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX); - tcg_out32 (s, EXTSB | RS (args[0]) | RA (args[0])); + case INDEX_op_ld8s_i64: + tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); + tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0])); break; case INDEX_op_ld16u_i32: - tcg_out_ldst (s, args[0], args[1], args[2], LHZ, LHZX); + case INDEX_op_ld16u_i64: + tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]); break; case INDEX_op_ld16s_i32: - tcg_out_ldst (s, args[0], args[1], args[2], LHA, LHAX); + case INDEX_op_ld16s_i64: + tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]); break; case INDEX_op_ld_i32: - tcg_out_ldst (s, args[0], args[1], args[2], LWZ, LWZX); + case INDEX_op_ld32u_i64: + tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]); + break; + case INDEX_op_ld32s_i64: + tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]); + break; + case INDEX_op_ld_i64: + tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]); break; case INDEX_op_st8_i32: - tcg_out_ldst (s, args[0], args[1], args[2], STB, STBX); + case INDEX_op_st8_i64: + tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]); break; case INDEX_op_st16_i32: - tcg_out_ldst (s, args[0], args[1], args[2], STH, STHX); + case INDEX_op_st16_i64: + tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]); break; case INDEX_op_st_i32: - tcg_out_ldst (s, args[0], args[1], args[2], STW, STWX); + case INDEX_op_st32_i64: + tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]); + break; + case INDEX_op_st_i64: + tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]); break; case INDEX_op_add_i32: - if (const_args[2]) - ppc_addi (s, args[0], args[1], args[2]); - else - tcg_out32 (s, ADD | TAB (args[0], args[1], args[2])); + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + do_addi_32: + tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2); + } else { + tcg_out32(s, ADD | TAB(a0, a1, a2)); + } break; case INDEX_op_sub_i32: - if (const_args[2]) - ppc_addi (s, args[0], args[1], -args[2]); - else - tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1])); + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[1]) { + if (const_args[2]) { + tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2); + } else { + tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); + } + } else if (const_args[2]) { + a2 = -a2; + goto do_addi_32; + } else { + tcg_out32(s, SUBF | TAB(a0, a2, a1)); + } break; case INDEX_op_and_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { - uint32_t c; - - c = args[2]; - - if (!c) { - tcg_out_movi (s, TCG_TYPE_I32, args[0], 0); - break; - } -#ifdef __PPU__ - uint32_t t, n; - int mb, me; - - n = c ^ -(c & 1); - t = n + (n & -n); - - if ((t & (t - 1)) == 0) { - int lzc, tzc; - - if ((c & 0x80000001) == 0x80000001) { - lzc = clz32 (n); - tzc = ctz32 (n); - - mb = 32 - tzc; - me = lzc - 1; - } - else { - lzc = clz32 (c); - tzc = ctz32 (c); - - mb = lzc; - me = 31 - tzc; - } - - tcg_out32 (s, (RLWINM - | RA (args[0]) - | RS (args[1]) - | SH (0) - | MB (mb) - | ME (me) - ) - ); - } - else -#endif /* !__PPU__ */ - { - if ((c & 0xffff) == c) - tcg_out32 (s, ANDI | RS (args[1]) | RA (args[0]) | c); - else if ((c & 0xffff0000) == c) - tcg_out32 (s, ANDIS | RS (args[1]) | RA (args[0]) - | ((c >> 16) & 0xffff)); - else { - tcg_out_movi (s, TCG_TYPE_I32, 0, c); - tcg_out32 (s, AND | SAB (args[1], args[0], 0)); - } - } + tcg_out_andi32(s, a0, a1, a2); + } else { + tcg_out32(s, AND | SAB(a1, a0, a2)); } - else - tcg_out32 (s, AND | SAB (args[1], args[0], args[2])); break; + case INDEX_op_and_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out_andi64(s, a0, a1, a2); + } else { + tcg_out32(s, AND | SAB(a1, a0, a2)); + } + break; + case INDEX_op_or_i64: case INDEX_op_or_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { - if (args[2] & 0xffff) { - tcg_out32 (s, ORI | RS (args[1]) | RA (args[0]) - | (args[2] & 0xffff)); - if (args[2] >> 16) - tcg_out32 (s, ORIS | RS (args[0]) | RA (args[0]) - | ((args[2] >> 16) & 0xffff)); - } - else { - tcg_out32 (s, ORIS | RS (args[1]) | RA (args[0]) - | ((args[2] >> 16) & 0xffff)); - } + tcg_out_ori32(s, a0, a1, a2); + } else { + tcg_out32(s, OR | SAB(a1, a0, a2)); } - else - tcg_out32 (s, OR | SAB (args[1], args[0], args[2])); break; + case INDEX_op_xor_i64: case INDEX_op_xor_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { - if ((args[2] & 0xffff) == args[2]) - tcg_out32 (s, XORI | RS (args[1]) | RA (args[0]) - | (args[2] & 0xffff)); - else if ((args[2] & 0xffff0000) == args[2]) - tcg_out32 (s, XORIS | RS (args[1]) | RA (args[0]) - | ((args[2] >> 16) & 0xffff)); - else { - tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]); - tcg_out32 (s, XOR | SAB (args[1], args[0], 0)); - } + tcg_out_xori32(s, a0, a1, a2); + } else { + tcg_out32(s, XOR | SAB(a1, a0, a2)); } - else - tcg_out32 (s, XOR | SAB (args[1], args[0], args[2])); break; case INDEX_op_andc_i32: - tcg_out32 (s, ANDC | SAB (args[1], args[0], args[2])); + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out_andi32(s, a0, a1, ~a2); + } else { + tcg_out32(s, ANDC | SAB(a1, a0, a2)); + } + break; + case INDEX_op_andc_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out_andi64(s, a0, a1, ~a2); + } else { + tcg_out32(s, ANDC | SAB(a1, a0, a2)); + } break; case INDEX_op_orc_i32: - tcg_out32 (s, ORC | SAB (args[1], args[0], args[2])); + if (const_args[2]) { + tcg_out_ori32(s, args[0], args[1], ~args[2]); + break; + } + /* FALLTHRU */ + case INDEX_op_orc_i64: + tcg_out32(s, ORC | SAB(args[1], args[0], args[2])); break; case INDEX_op_eqv_i32: - tcg_out32 (s, EQV | SAB (args[1], args[0], args[2])); + if (const_args[2]) { + tcg_out_xori32(s, args[0], args[1], ~args[2]); + break; + } + /* FALLTHRU */ + case INDEX_op_eqv_i64: + tcg_out32(s, EQV | SAB(args[1], args[0], args[2])); break; case INDEX_op_nand_i32: - tcg_out32 (s, NAND | SAB (args[1], args[0], args[2])); + case INDEX_op_nand_i64: + tcg_out32(s, NAND | SAB(args[1], args[0], args[2])); break; case INDEX_op_nor_i32: - tcg_out32 (s, NOR | SAB (args[1], args[0], args[2])); + case INDEX_op_nor_i64: + tcg_out32(s, NOR | SAB(args[1], args[0], args[2])); break; case INDEX_op_mul_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { - if (args[2] == (int16_t) args[2]) - tcg_out32 (s, MULLI | RT (args[0]) | RA (args[1]) - | (args[2] & 0xffff)); - else { - tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]); - tcg_out32 (s, MULLW | TAB (args[0], args[1], 0)); - } + tcg_out32(s, MULLI | TAI(a0, a1, a2)); + } else { + tcg_out32(s, MULLW | TAB(a0, a1, a2)); } - else - tcg_out32 (s, MULLW | TAB (args[0], args[1], args[2])); break; case INDEX_op_div_i32: - tcg_out32 (s, DIVW | TAB (args[0], args[1], args[2])); + tcg_out32(s, DIVW | TAB(args[0], args[1], args[2])); break; case INDEX_op_divu_i32: - tcg_out32 (s, DIVWU | TAB (args[0], args[1], args[2])); - break; - - case INDEX_op_mulu2_i32: - if (args[0] == args[2] || args[0] == args[3]) { - tcg_out32 (s, MULLW | TAB (0, args[2], args[3])); - tcg_out32 (s, MULHWU | TAB (args[1], args[2], args[3])); - tcg_out_mov (s, TCG_TYPE_I32, args[0], 0); - } - else { - tcg_out32 (s, MULLW | TAB (args[0], args[2], args[3])); - tcg_out32 (s, MULHWU | TAB (args[1], args[2], args[3])); - } + tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2])); break; case INDEX_op_shl_i32: if (const_args[2]) { - tcg_out32 (s, (RLWINM - | RA (args[0]) - | RS (args[1]) - | SH (args[2]) - | MB (0) - | ME (31 - args[2]) - ) - ); - } - else - tcg_out32 (s, SLW | SAB (args[1], args[0], args[2])); + tcg_out_shli32(s, args[0], args[1], args[2]); + } else { + tcg_out32(s, SLW | SAB(args[1], args[0], args[2])); + } break; case INDEX_op_shr_i32: if (const_args[2]) { - tcg_out32 (s, (RLWINM - | RA (args[0]) - | RS (args[1]) - | SH (32 - args[2]) - | MB (args[2]) - | ME (31) - ) - ); - } - else - tcg_out32 (s, SRW | SAB (args[1], args[0], args[2])); + tcg_out_shri32(s, args[0], args[1], args[2]); + } else { + tcg_out32(s, SRW | SAB(args[1], args[0], args[2])); + } break; case INDEX_op_sar_i32: - if (const_args[2]) - tcg_out32 (s, SRAWI | RS (args[1]) | RA (args[0]) | SH (args[2])); - else - tcg_out32 (s, SRAW | SAB (args[1], args[0], args[2])); + if (const_args[2]) { + tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2])); + } else { + tcg_out32(s, SRAW | SAB(args[1], args[0], args[2])); + } break; case INDEX_op_rotl_i32: - { - int op = 0 - | RA (args[0]) - | RS (args[1]) - | MB (0) - | ME (31) - | (const_args[2] ? RLWINM | SH (args[2]) - : RLWNM | RB (args[2])) - ; - tcg_out32 (s, op); + if (const_args[2]) { + tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31); + } else { + tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2]) + | MB(0) | ME(31)); } break; case INDEX_op_rotr_i32: if (const_args[2]) { - if (!args[2]) { - tcg_out_mov (s, TCG_TYPE_I32, args[0], args[1]); - } - else { - tcg_out32 (s, RLWINM - | RA (args[0]) - | RS (args[1]) - | SH (32 - args[2]) - | MB (0) - | ME (31) - ); - } - } - else { - tcg_out32 (s, SUBFIC | RT (0) | RA (args[2]) | 32); - tcg_out32 (s, RLWNM - | RA (args[0]) - | RS (args[1]) - | RB (0) - | MB (0) - | ME (31) - ); - } - break; - - case INDEX_op_add2_i32: - if (args[0] == args[3] || args[0] == args[5]) { - tcg_out32 (s, ADDC | TAB (0, args[2], args[4])); - tcg_out32 (s, ADDE | TAB (args[1], args[3], args[5])); - tcg_out_mov (s, TCG_TYPE_I32, args[0], 0); - } - else { - tcg_out32 (s, ADDC | TAB (args[0], args[2], args[4])); - tcg_out32 (s, ADDE | TAB (args[1], args[3], args[5])); - } - break; - case INDEX_op_sub2_i32: - if (args[0] == args[3] || args[0] == args[5]) { - tcg_out32 (s, SUBFC | TAB (0, args[4], args[2])); - tcg_out32 (s, SUBFE | TAB (args[1], args[5], args[3])); - tcg_out_mov (s, TCG_TYPE_I32, args[0], 0); - } - else { - tcg_out32 (s, SUBFC | TAB (args[0], args[4], args[2])); - tcg_out32 (s, SUBFE | TAB (args[1], args[5], args[3])); + tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31); + } else { + tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32)); + tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0) + | MB(0) | ME(31)); } break; case INDEX_op_brcond_i32: - /* - args[0] = r0 - args[1] = r1 - args[2] = cond - args[3] = r1 is const - args[4] = label_index - */ - tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3]); + tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], + args[3], TCG_TYPE_I32); + break; + case INDEX_op_brcond_i64: + tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], + args[3], TCG_TYPE_I64); break; case INDEX_op_brcond2_i32: tcg_out_brcond2(s, args, const_args); break; case INDEX_op_neg_i32: - tcg_out32 (s, NEG | RT (args[0]) | RA (args[1])); + case INDEX_op_neg_i64: + tcg_out32(s, NEG | RT(args[0]) | RA(args[1])); break; case INDEX_op_not_i32: - tcg_out32 (s, NOR | SAB (args[1], args[0], args[1])); + case INDEX_op_not_i64: + tcg_out32(s, NOR | SAB(args[1], args[0], args[1])); + break; + + case INDEX_op_add_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + do_addi_64: + tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2); + } else { + tcg_out32(s, ADD | TAB(a0, a1, a2)); + } + break; + case INDEX_op_sub_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[1]) { + if (const_args[2]) { + tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2); + } else { + tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); + } + } else if (const_args[2]) { + a2 = -a2; + goto do_addi_64; + } else { + tcg_out32(s, SUBF | TAB(a0, a2, a1)); + } + break; + + case INDEX_op_shl_i64: + if (const_args[2]) { + tcg_out_shli64(s, args[0], args[1], args[2]); + } else { + tcg_out32(s, SLD | SAB(args[1], args[0], args[2])); + } + break; + case INDEX_op_shr_i64: + if (const_args[2]) { + tcg_out_shri64(s, args[0], args[1], args[2]); + } else { + tcg_out32(s, SRD | SAB(args[1], args[0], args[2])); + } + break; + case INDEX_op_sar_i64: + if (const_args[2]) { + int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1); + tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh); + } else { + tcg_out32(s, SRAD | SAB(args[1], args[0], args[2])); + } + break; + case INDEX_op_rotl_i64: + if (const_args[2]) { + tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0); + } else { + tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0)); + } + break; + case INDEX_op_rotr_i64: + if (const_args[2]) { + tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0); + } else { + tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64)); + tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0)); + } + break; + + case INDEX_op_mul_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out32(s, MULLI | TAI(a0, a1, a2)); + } else { + tcg_out32(s, MULLD | TAB(a0, a1, a2)); + } + break; + case INDEX_op_div_i64: + tcg_out32(s, DIVD | TAB(args[0], args[1], args[2])); + break; + case INDEX_op_divu_i64: + tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2])); break; case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, args, 0); + tcg_out_qemu_ld(s, args, false); break; case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args, 1); + tcg_out_qemu_ld(s, args, true); break; case INDEX_op_qemu_st_i32: - tcg_out_qemu_st(s, args, 0); + tcg_out_qemu_st(s, args, false); break; case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args, 1); + tcg_out_qemu_st(s, args, true); break; case INDEX_op_ext8s_i32: - tcg_out32 (s, EXTSB | RS (args[1]) | RA (args[0])); - break; - case INDEX_op_ext8u_i32: - tcg_out32 (s, RLWINM - | RA (args[0]) - | RS (args[1]) - | SH (0) - | MB (24) - | ME (31) - ); - break; + case INDEX_op_ext8s_i64: + c = EXTSB; + goto gen_ext; case INDEX_op_ext16s_i32: - tcg_out32 (s, EXTSH | RS (args[1]) | RA (args[0])); - break; - case INDEX_op_ext16u_i32: - tcg_out32 (s, RLWINM - | RA (args[0]) - | RS (args[1]) - | SH (0) - | MB (16) - | ME (31) - ); + case INDEX_op_ext16s_i64: + c = EXTSH; + goto gen_ext; + case INDEX_op_ext32s_i64: + c = EXTSW; + goto gen_ext; + gen_ext: + tcg_out32(s, c | RS(args[1]) | RA(args[0])); break; case INDEX_op_setcond_i32: - tcg_out_setcond (s, args[3], args[0], args[1], args[2], const_args[2]); + tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2], + const_args[2]); + break; + case INDEX_op_setcond_i64: + tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2], + const_args[2]); break; case INDEX_op_setcond2_i32: - tcg_out_setcond2 (s, args, const_args); + tcg_out_setcond2(s, args, const_args); break; case INDEX_op_bswap16_i32: - /* Stolen from gcc's builtin_bswap16 */ - + case INDEX_op_bswap16_i64: + a0 = args[0], a1 = args[1]; /* a1 = abcd */ - - /* r0 = (a1 << 8) & 0xff00 # 00d0 */ - tcg_out32 (s, RLWINM - | RA (0) - | RS (args[1]) - | SH (8) - | MB (16) - | ME (23) - ); - - /* a0 = rotate_left (a1, 24) & 0xff # 000c */ - tcg_out32 (s, RLWINM - | RA (args[0]) - | RS (args[1]) - | SH (24) - | MB (24) - | ME (31) - ); - - /* a0 = a0 | r0 # 00dc */ - tcg_out32 (s, OR | SAB (0, args[0], args[0])); + if (a0 != a1) { + /* a0 = (a1 r<< 24) & 0xff # 000c */ + tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31); + /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */ + tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23); + } else { + /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */ + tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23); + /* a0 = (a1 r<< 24) & 0xff # 000c */ + tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31); + /* a0 = a0 | r0 # 00dc */ + tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0)); + } break; case INDEX_op_bswap32_i32: + case INDEX_op_bswap32_i64: /* Stolen from gcc's builtin_bswap32 */ - { - int a0 = args[0]; - - /* a1 = args[1] # abcd */ + a1 = args[1]; + a0 = args[0] == a1 ? TCG_REG_R0 : args[0]; + + /* a1 = args[1] # abcd */ + /* a0 = rotate_left (a1, 8) # bcda */ + tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31); + /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */ + tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7); + /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */ + tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23); + + if (a0 == TCG_REG_R0) { + tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); + } + break; - if (a0 == args[1]) { - a0 = 0; - } + case INDEX_op_bswap64_i64: + a0 = args[0], a1 = args[1], a2 = TCG_REG_R0; + if (a0 == a1) { + a0 = TCG_REG_R0; + a2 = a1; + } - /* a0 = rotate_left (a1, 8) # bcda */ - tcg_out32 (s, RLWINM - | RA (a0) - | RS (args[1]) - | SH (8) - | MB (0) - | ME (31) - ); - - /* a0 = (a0 & ~0xff000000) | ((a1 << 24) & 0xff000000) # dcda */ - tcg_out32 (s, RLWIMI - | RA (a0) - | RS (args[1]) - | SH (24) - | MB (0) - | ME (7) - ); - - /* a0 = (a0 & ~0x0000ff00) | ((a1 << 24) & 0x0000ff00) # dcba */ - tcg_out32 (s, RLWIMI - | RA (a0) - | RS (args[1]) - | SH (24) - | MB (16) - | ME (23) - ); - - if (!a0) { - tcg_out_mov (s, TCG_TYPE_I32, args[0], a0); - } + /* a1 = # abcd efgh */ + /* a0 = rl32(a1, 8) # 0000 fghe */ + tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31); + /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */ + tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7); + /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */ + tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23); + + /* a0 = rl64(a0, 32) # hgfe 0000 */ + /* a2 = rl64(a1, 32) # efgh abcd */ + tcg_out_rld(s, RLDICL, a0, a0, 32, 0); + tcg_out_rld(s, RLDICL, a2, a1, 32, 0); + + /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */ + tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31); + /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */ + tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7); + /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */ + tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23); + + if (a0 == 0) { + tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); } break; case INDEX_op_deposit_i32: - tcg_out32 (s, RLWIMI - | RA (args[0]) - | RS (args[2]) - | SH (args[3]) - | MB (32 - args[3] - args[4]) - | ME (31 - args[3]) - ); + if (const_args[2]) { + uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3]; + tcg_out_andi32(s, args[0], args[0], ~mask); + } else { + tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3], + 32 - args[3] - args[4], 31 - args[3]); + } + break; + case INDEX_op_deposit_i64: + if (const_args[2]) { + uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3]; + tcg_out_andi64(s, args[0], args[0], ~mask); + } else { + tcg_out_rld(s, RLDIMI, args[0], args[2], args[3], + 64 - args[3] - args[4]); + } break; case INDEX_op_movcond_i32: - tcg_out_movcond (s, args[5], args[0], - args[1], args[2], - args[3], args[4], - const_args[2]); + tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2], + args[3], args[4], const_args[2]); + break; + case INDEX_op_movcond_i64: + tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2], + args[3], args[4], const_args[2]); + break; + +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_add2_i64: +#else + case INDEX_op_add2_i32: +#endif + /* Note that the CA bit is defined based on the word size of the + environment. So in 64-bit mode it's always carry-out of bit 63. + The fallback code using deposit works just as well for 32-bit. */ + a0 = args[0], a1 = args[1]; + if (a0 == args[3] || (!const_args[5] && a0 == args[5])) { + a0 = TCG_REG_R0; + } + if (const_args[4]) { + tcg_out32(s, ADDIC | TAI(a0, args[2], args[4])); + } else { + tcg_out32(s, ADDC | TAB(a0, args[2], args[4])); + } + if (const_args[5]) { + tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3])); + } else { + tcg_out32(s, ADDE | TAB(a1, args[3], args[5])); + } + if (a0 != args[0]) { + tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); + } break; - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ - case INDEX_op_call: /* Always emitted via tcg_out_call. */ +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_sub2_i64: +#else + case INDEX_op_sub2_i32: +#endif + a0 = args[0], a1 = args[1]; + if (a0 == args[5] || (!const_args[3] && a0 == args[3])) { + a0 = TCG_REG_R0; + } + if (const_args[2]) { + tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2])); + } else { + tcg_out32(s, SUBFC | TAB(a0, args[4], args[2])); + } + if (const_args[3]) { + tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5])); + } else { + tcg_out32(s, SUBFE | TAB(a1, args[5], args[3])); + } + if (a0 != args[0]) { + tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); + } + break; + + case INDEX_op_muluh_i32: + tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2])); + break; + case INDEX_op_mulsh_i32: + tcg_out32(s, MULHW | TAB(args[0], args[1], args[2])); + break; + case INDEX_op_muluh_i64: + tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2])); + break; + case INDEX_op_mulsh_i64: + tcg_out32(s, MULHD | TAB(args[0], args[1], args[2])); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: tcg_abort(); } @@ -1834,79 +2396,146 @@ static const TCGTargetOpDef ppc_op_defs[] = { { INDEX_op_ld16u_i32, { "r", "r" } }, { INDEX_op_ld16s_i32, { "r", "r" } }, { INDEX_op_ld_i32, { "r", "r" } }, + { INDEX_op_st8_i32, { "r", "r" } }, { INDEX_op_st16_i32, { "r", "r" } }, { INDEX_op_st_i32, { "r", "r" } }, { INDEX_op_add_i32, { "r", "r", "ri" } }, - { INDEX_op_mul_i32, { "r", "r", "ri" } }, + { INDEX_op_mul_i32, { "r", "r", "rI" } }, { INDEX_op_div_i32, { "r", "r", "r" } }, { INDEX_op_divu_i32, { "r", "r", "r" } }, - { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } }, - { INDEX_op_sub_i32, { "r", "r", "ri" } }, + { INDEX_op_sub_i32, { "r", "rI", "ri" } }, { INDEX_op_and_i32, { "r", "r", "ri" } }, { INDEX_op_or_i32, { "r", "r", "ri" } }, { INDEX_op_xor_i32, { "r", "r", "ri" } }, + { INDEX_op_andc_i32, { "r", "r", "ri" } }, + { INDEX_op_orc_i32, { "r", "r", "ri" } }, + { INDEX_op_eqv_i32, { "r", "r", "ri" } }, + { INDEX_op_nand_i32, { "r", "r", "r" } }, + { INDEX_op_nor_i32, { "r", "r", "r" } }, { INDEX_op_shl_i32, { "r", "r", "ri" } }, { INDEX_op_shr_i32, { "r", "r", "ri" } }, { INDEX_op_sar_i32, { "r", "r", "ri" } }, - { INDEX_op_rotl_i32, { "r", "r", "ri" } }, { INDEX_op_rotr_i32, { "r", "r", "ri" } }, - { INDEX_op_brcond_i32, { "r", "ri" } }, - - { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } }, - { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } }, - { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } }, - { INDEX_op_neg_i32, { "r", "r" } }, { INDEX_op_not_i32, { "r", "r" } }, + { INDEX_op_ext8s_i32, { "r", "r" } }, + { INDEX_op_ext16s_i32, { "r", "r" } }, + { INDEX_op_bswap16_i32, { "r", "r" } }, + { INDEX_op_bswap32_i32, { "r", "r" } }, - { INDEX_op_andc_i32, { "r", "r", "r" } }, - { INDEX_op_orc_i32, { "r", "r", "r" } }, - { INDEX_op_eqv_i32, { "r", "r", "r" } }, - { INDEX_op_nand_i32, { "r", "r", "r" } }, - { INDEX_op_nor_i32, { "r", "r", "r" } }, - + { INDEX_op_brcond_i32, { "r", "ri" } }, { INDEX_op_setcond_i32, { "r", "r", "ri" } }, + { INDEX_op_movcond_i32, { "r", "r", "ri", "rZ", "rZ" } }, + + { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, + + { INDEX_op_muluh_i32, { "r", "r", "r" } }, + { INDEX_op_mulsh_i32, { "r", "r", "r" } }, + +#if TCG_TARGET_REG_BITS == 64 + { INDEX_op_ld8u_i64, { "r", "r" } }, + { INDEX_op_ld8s_i64, { "r", "r" } }, + { INDEX_op_ld16u_i64, { "r", "r" } }, + { INDEX_op_ld16s_i64, { "r", "r" } }, + { INDEX_op_ld32u_i64, { "r", "r" } }, + { INDEX_op_ld32s_i64, { "r", "r" } }, + { INDEX_op_ld_i64, { "r", "r" } }, + + { INDEX_op_st8_i64, { "r", "r" } }, + { INDEX_op_st16_i64, { "r", "r" } }, + { INDEX_op_st32_i64, { "r", "r" } }, + { INDEX_op_st_i64, { "r", "r" } }, + + { INDEX_op_add_i64, { "r", "r", "rT" } }, + { INDEX_op_sub_i64, { "r", "rI", "rT" } }, + { INDEX_op_and_i64, { "r", "r", "ri" } }, + { INDEX_op_or_i64, { "r", "r", "rU" } }, + { INDEX_op_xor_i64, { "r", "r", "rU" } }, + { INDEX_op_andc_i64, { "r", "r", "ri" } }, + { INDEX_op_orc_i64, { "r", "r", "r" } }, + { INDEX_op_eqv_i64, { "r", "r", "r" } }, + { INDEX_op_nand_i64, { "r", "r", "r" } }, + { INDEX_op_nor_i64, { "r", "r", "r" } }, + + { INDEX_op_shl_i64, { "r", "r", "ri" } }, + { INDEX_op_shr_i64, { "r", "r", "ri" } }, + { INDEX_op_sar_i64, { "r", "r", "ri" } }, + { INDEX_op_rotl_i64, { "r", "r", "ri" } }, + { INDEX_op_rotr_i64, { "r", "r", "ri" } }, + + { INDEX_op_mul_i64, { "r", "r", "rI" } }, + { INDEX_op_div_i64, { "r", "r", "r" } }, + { INDEX_op_divu_i64, { "r", "r", "r" } }, + + { INDEX_op_neg_i64, { "r", "r" } }, + { INDEX_op_not_i64, { "r", "r" } }, + { INDEX_op_ext8s_i64, { "r", "r" } }, + { INDEX_op_ext16s_i64, { "r", "r" } }, + { INDEX_op_ext32s_i64, { "r", "r" } }, + { INDEX_op_bswap16_i64, { "r", "r" } }, + { INDEX_op_bswap32_i64, { "r", "r" } }, + { INDEX_op_bswap64_i64, { "r", "r" } }, + + { INDEX_op_brcond_i64, { "r", "ri" } }, + { INDEX_op_setcond_i64, { "r", "r", "ri" } }, + { INDEX_op_movcond_i64, { "r", "r", "ri", "rZ", "rZ" } }, + + { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, + + { INDEX_op_mulsh_i64, { "r", "r", "r" } }, + { INDEX_op_muluh_i64, { "r", "r", "r" } }, +#endif + +#if TCG_TARGET_REG_BITS == 32 + { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } }, +#endif - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, +#if TCG_TARGET_REG_BITS == 64 + { INDEX_op_add2_i64, { "r", "r", "r", "r", "rI", "rZM" } }, + { INDEX_op_sub2_i64, { "r", "r", "rI", "rZM", "r", "r" } }, +#else + { INDEX_op_add2_i32, { "r", "r", "r", "r", "rI", "rZM" } }, + { INDEX_op_sub2_i32, { "r", "r", "rI", "rZM", "r", "r" } }, +#endif -#if TARGET_LONG_BITS == 32 +#if TCG_TARGET_REG_BITS == 64 + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "S", "S" } }, + { INDEX_op_qemu_ld_i64, { "r", "L" } }, + { INDEX_op_qemu_st_i64, { "S", "S" } }, +#elif TARGET_LONG_BITS == 32 { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "S", "S" } }, { INDEX_op_qemu_ld_i64, { "L", "L", "L" } }, - { INDEX_op_qemu_st_i32, { "K", "K" } }, - { INDEX_op_qemu_st_i64, { "M", "M", "M" } }, + { INDEX_op_qemu_st_i64, { "S", "S", "S" } }, #else { INDEX_op_qemu_ld_i32, { "r", "L", "L" } }, + { INDEX_op_qemu_st_i32, { "S", "S", "S" } }, { INDEX_op_qemu_ld_i64, { "L", "L", "L", "L" } }, - { INDEX_op_qemu_st_i32, { "K", "K", "K" } }, - { INDEX_op_qemu_st_i64, { "M", "M", "M", "M" } }, + { INDEX_op_qemu_st_i64, { "S", "S", "S", "S" } }, #endif - { INDEX_op_ext8s_i32, { "r", "r" } }, - { INDEX_op_ext8u_i32, { "r", "r" } }, - { INDEX_op_ext16s_i32, { "r", "r" } }, - { INDEX_op_ext16u_i32, { "r", "r" } }, - - { INDEX_op_deposit_i32, { "r", "0", "r" } }, - { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "r" } }, - { -1 }, }; static void tcg_target_init(TCGContext *s) { + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + if (hwcap & PPC_FEATURE_ARCH_2_06) { + have_isa_2_06 = true; + } + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); tcg_regset_set32(tcg_target_call_clobber_regs, 0, (1 << TCG_REG_R0) | -#ifdef TCG_TARGET_CALL_DARWIN (1 << TCG_REG_R2) | -#endif (1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | @@ -1916,18 +2545,173 @@ static void tcg_target_init(TCGContext *s) (1 << TCG_REG_R9) | (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | - (1 << TCG_REG_R12) - ); + (1 << TCG_REG_R12)); tcg_regset_clear(s->reserved_regs); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); -#ifndef TCG_TARGET_CALL_DARWIN - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */ +#if defined(_CALL_SYSV) + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */ #endif -#ifdef _CALL_SYSV - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); +#if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64 + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ #endif + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */ + if (USE_REG_RA) { + tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return addr */ + } tcg_add_target_add_op_defs(ppc_op_defs); } + +#ifdef __ELF__ +typedef struct { + DebugFrameCIE cie; + DebugFrameFDEHeader fde; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3]; +} DebugFrame; + +/* We're expecting a 2 byte uleb128 encoded value. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); + +#if TCG_TARGET_REG_BITS == 64 +# define ELF_HOST_MACHINE EM_PPC64 +#else +# define ELF_HOST_MACHINE EM_PPC +#endif + +static DebugFrame debug_frame = { + .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ + .cie.id = -1, + .cie.version = 1, + .cie.code_align = 1, + .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */ + .cie.return_column = 65, + + /* Total FDE size does not include the "len" member. */ + .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset), + + .fde_def_cfa = { + 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */ + 0x11, 65, (LR_OFFSET / -SZR) & 0x7f, + } +}; + +void tcg_register_jit(void *buf, size_t buf_size) +{ + uint8_t *p = &debug_frame.fde_reg_ofs[3]; + int i; + + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) { + p[0] = 0x80 + tcg_target_callee_save_regs[i]; + p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR; + } + + debug_frame.fde.func_start = (uintptr_t)buf; + debug_frame.fde.func_len = buf_size; + + tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); +} +#endif /* __ELF__ */ + +static size_t dcache_bsize = 16; +static size_t icache_bsize = 16; + +void flush_icache_range(uintptr_t start, uintptr_t stop) +{ + uintptr_t p, start1, stop1; + size_t dsize = dcache_bsize; + size_t isize = icache_bsize; + + start1 = start & ~(dsize - 1); + stop1 = (stop + dsize - 1) & ~(dsize - 1); + for (p = start1; p < stop1; p += dsize) { + asm volatile ("dcbst 0,%0" : : "r"(p) : "memory"); + } + asm volatile ("sync" : : : "memory"); + + start &= start & ~(isize - 1); + stop1 = (stop + isize - 1) & ~(isize - 1); + for (p = start1; p < stop1; p += isize) { + asm volatile ("icbi 0,%0" : : "r"(p) : "memory"); + } + asm volatile ("sync" : : : "memory"); + asm volatile ("isync" : : : "memory"); +} + +#if defined _AIX +#include <sys/systemcfg.h> + +static void __attribute__((constructor)) tcg_cache_init(void) +{ + icache_bsize = _system_configuration.icache_line; + dcache_bsize = _system_configuration.dcache_line; +} + +#elif defined __linux__ +static void __attribute__((constructor)) tcg_cache_init(void) +{ + unsigned long dsize = qemu_getauxval(AT_DCACHEBSIZE); + unsigned long isize = qemu_getauxval(AT_ICACHEBSIZE); + + if (dsize == 0 || isize == 0) { + if (dsize == 0) { + fprintf(stderr, "getauxval AT_DCACHEBSIZE failed\n"); + } + if (isize == 0) { + fprintf(stderr, "getauxval AT_ICACHEBSIZE failed\n"); + } + exit(1); + } + dcache_bsize = dsize; + icache_bsize = isize; +} + +#elif defined __APPLE__ +#include <stdio.h> +#include <sys/types.h> +#include <sys/sysctl.h> + +static void __attribute__((constructor)) tcg_cache_init(void) +{ + size_t len; + unsigned cacheline; + int name[2] = { CTL_HW, HW_CACHELINE }; + + len = sizeof(cacheline); + if (sysctl(name, 2, &cacheline, &len, NULL, 0)) { + perror("sysctl CTL_HW HW_CACHELINE failed"); + exit(1); + } + dcache_bsize = cacheline; + icache_bsize = cacheline; +} + +#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/types.h> +#include <sys/sysctl.h> + +static void __attribute__((constructor)) tcg_cache_init(void) +{ + size_t len = 4; + unsigned cacheline; + + if (sysctlbyname ("machdep.cacheline_size", &cacheline, &len, NULL, 0)) { + fprintf(stderr, "sysctlbyname machdep.cacheline_size failed: %s\n", + strerror(errno)); + exit(1); + } + dcache_bsize = cacheline; + icache_bsize = cacheline; +} +#endif diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 05069ae2d8..32ac4424db 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -21,60 +21,35 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#ifndef TCG_TARGET_PPC -#define TCG_TARGET_PPC 1 +#ifndef TCG_TARGET_PPC64 +#define TCG_TARGET_PPC64 1 + +#ifdef _ARCH_PPC64 +# define TCG_TARGET_REG_BITS 64 +#else +# define TCG_TARGET_REG_BITS 32 +#endif #define TCG_TARGET_NB_REGS 32 #define TCG_TARGET_INSN_UNIT_SIZE 4 typedef enum { - TCG_REG_R0 = 0, - TCG_REG_R1, - TCG_REG_R2, - TCG_REG_R3, - TCG_REG_R4, - TCG_REG_R5, - TCG_REG_R6, - TCG_REG_R7, - TCG_REG_R8, - TCG_REG_R9, - TCG_REG_R10, - TCG_REG_R11, - TCG_REG_R12, - TCG_REG_R13, - TCG_REG_R14, - TCG_REG_R15, - TCG_REG_R16, - TCG_REG_R17, - TCG_REG_R18, - TCG_REG_R19, - TCG_REG_R20, - TCG_REG_R21, - TCG_REG_R22, - TCG_REG_R23, - TCG_REG_R24, - TCG_REG_R25, - TCG_REG_R26, - TCG_REG_R27, - TCG_REG_R28, - TCG_REG_R29, - TCG_REG_R30, - TCG_REG_R31 + TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, + TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7, + TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11, + TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15, + TCG_REG_R16, TCG_REG_R17, TCG_REG_R18, TCG_REG_R19, + TCG_REG_R20, TCG_REG_R21, TCG_REG_R22, TCG_REG_R23, + TCG_REG_R24, TCG_REG_R25, TCG_REG_R26, TCG_REG_R27, + TCG_REG_R28, TCG_REG_R29, TCG_REG_R30, TCG_REG_R31, + + TCG_REG_CALL_STACK = TCG_REG_R1, + TCG_AREG0 = TCG_REG_R27 } TCGReg; -/* used for function call generation */ -#define TCG_REG_CALL_STACK TCG_REG_R1 -#define TCG_TARGET_STACK_ALIGN 16 -#if defined _CALL_DARWIN || defined __APPLE__ -#define TCG_TARGET_CALL_STACK_OFFSET 24 -#elif defined _CALL_AIX -#define TCG_TARGET_CALL_STACK_OFFSET 52 -#elif defined _CALL_SYSV -#define TCG_TARGET_CALL_ALIGN_ARGS 1 -#define TCG_TARGET_CALL_STACK_OFFSET 8 -#else -#error Unsupported system -#endif +/* optional instructions automatically implemented */ +#define TCG_TARGET_HAS_ext8u_i32 0 /* andi */ +#define TCG_TARGET_HAS_ext16u_i32 0 /* optional instructions */ #define TCG_TARGET_HAS_div_i32 1 @@ -82,8 +57,6 @@ typedef enum { #define TCG_TARGET_HAS_rot_i32 1 #define TCG_TARGET_HAS_ext8s_i32 1 #define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_ext8u_i32 1 -#define TCG_TARGET_HAS_ext16u_i32 1 #define TCG_TARGET_HAS_bswap16_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 #define TCG_TARGET_HAS_not_i32 1 @@ -95,15 +68,44 @@ typedef enum { #define TCG_TARGET_HAS_nor_i32 1 #define TCG_TARGET_HAS_deposit_i32 1 #define TCG_TARGET_HAS_movcond_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 0 #define TCG_TARGET_HAS_muls2_i32 0 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_mulsh_i32 0 +#define TCG_TARGET_HAS_muluh_i32 1 +#define TCG_TARGET_HAS_mulsh_i32 1 -#define TCG_AREG0 TCG_REG_R27 +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_add2_i32 0 +#define TCG_TARGET_HAS_sub2_i32 0 +#define TCG_TARGET_HAS_trunc_shr_i32 0 +#define TCG_TARGET_HAS_div_i64 1 +#define TCG_TARGET_HAS_rem_i64 0 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 0 +#define TCG_TARGET_HAS_ext16u_i64 0 +#define TCG_TARGET_HAS_ext32u_i64 0 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_neg_i64 1 +#define TCG_TARGET_HAS_andc_i64 1 +#define TCG_TARGET_HAS_orc_i64 1 +#define TCG_TARGET_HAS_eqv_i64 1 +#define TCG_TARGET_HAS_nand_i64 1 +#define TCG_TARGET_HAS_nor_i64 1 +#define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_movcond_i64 1 +#define TCG_TARGET_HAS_add2_i64 1 +#define TCG_TARGET_HAS_sub2_i64 1 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 1 +#define TCG_TARGET_HAS_mulsh_i64 1 +#endif -#define tcg_qemu_tb_exec(env, tb_ptr) \ - ((uintptr_t __attribute__ ((longcall)) \ - (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr) +void flush_icache_range(uintptr_t start, uintptr_t stop); #endif diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c deleted file mode 100644 index c90ddcd03a..0000000000 --- a/tcg/ppc64/tcg-target.c +++ /dev/null @@ -1,2186 +0,0 @@ -/* - * Tiny Code Generator for QEMU - * - * Copyright (c) 2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "tcg-be-ldst.h" - -#define TCG_CT_CONST_S16 0x100 -#define TCG_CT_CONST_U16 0x200 -#define TCG_CT_CONST_S32 0x400 -#define TCG_CT_CONST_U32 0x800 -#define TCG_CT_CONST_ZERO 0x1000 -#define TCG_CT_CONST_MONE 0x2000 - -static tcg_insn_unit *tb_ret_addr; - -#if TARGET_LONG_BITS == 32 -#define LD_ADDR LWZ -#define CMP_L 0 -#else -#define LD_ADDR LD -#define CMP_L (1<<21) -#endif - -#ifndef GUEST_BASE -#define GUEST_BASE 0 -#endif - -#include "elf.h" -static bool have_isa_2_06; -#define HAVE_ISA_2_06 have_isa_2_06 -#define HAVE_ISEL have_isa_2_06 - -#ifdef CONFIG_USE_GUEST_BASE -#define TCG_GUEST_BASE_REG 30 -#else -#define TCG_GUEST_BASE_REG 0 -#endif - -#ifndef NDEBUG -static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { - "r0", - "r1", - "r2", - "r3", - "r4", - "r5", - "r6", - "r7", - "r8", - "r9", - "r10", - "r11", - "r12", - "r13", - "r14", - "r15", - "r16", - "r17", - "r18", - "r19", - "r20", - "r21", - "r22", - "r23", - "r24", - "r25", - "r26", - "r27", - "r28", - "r29", - "r30", - "r31" -}; -#endif - -static const int tcg_target_reg_alloc_order[] = { - TCG_REG_R14, /* call saved registers */ - TCG_REG_R15, - TCG_REG_R16, - TCG_REG_R17, - TCG_REG_R18, - TCG_REG_R19, - TCG_REG_R20, - TCG_REG_R21, - TCG_REG_R22, - TCG_REG_R23, - TCG_REG_R24, - TCG_REG_R25, - TCG_REG_R26, - TCG_REG_R27, - TCG_REG_R28, - TCG_REG_R29, - TCG_REG_R30, - TCG_REG_R31, - TCG_REG_R12, /* call clobbered, non-arguments */ - TCG_REG_R11, - TCG_REG_R10, /* call clobbered, arguments */ - TCG_REG_R9, - TCG_REG_R8, - TCG_REG_R7, - TCG_REG_R6, - TCG_REG_R5, - TCG_REG_R4, - TCG_REG_R3, -}; - -static const int tcg_target_call_iarg_regs[] = { - TCG_REG_R3, - TCG_REG_R4, - TCG_REG_R5, - TCG_REG_R6, - TCG_REG_R7, - TCG_REG_R8, - TCG_REG_R9, - TCG_REG_R10 -}; - -static const int tcg_target_call_oarg_regs[] = { - TCG_REG_R3 -}; - -static const int tcg_target_callee_save_regs[] = { -#ifdef __APPLE__ - TCG_REG_R11, -#endif - TCG_REG_R14, - TCG_REG_R15, - TCG_REG_R16, - TCG_REG_R17, - TCG_REG_R18, - TCG_REG_R19, - TCG_REG_R20, - TCG_REG_R21, - TCG_REG_R22, - TCG_REG_R23, - TCG_REG_R24, - TCG_REG_R25, - TCG_REG_R26, - TCG_REG_R27, /* currently used for the global env */ - TCG_REG_R28, - TCG_REG_R29, - TCG_REG_R30, - TCG_REG_R31 -}; - -static inline bool in_range_b(tcg_target_long target) -{ - return target == sextract64(target, 0, 26); -} - -static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target) -{ - ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); - assert(in_range_b(disp)); - return disp & 0x3fffffc; -} - -static void reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target) -{ - *pc = (*pc & ~0x3fffffc) | reloc_pc24_val(pc, target); -} - -static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target) -{ - ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); - assert(disp == (int16_t) disp); - return disp & 0xfffc; -} - -static void reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target) -{ - *pc = (*pc & ~0xfffc) | reloc_pc14_val(pc, target); -} - -static inline void tcg_out_b_noaddr(TCGContext *s, int insn) -{ - unsigned retrans = *s->code_ptr & 0x3fffffc; - tcg_out32(s, insn | retrans); -} - -static inline void tcg_out_bc_noaddr(TCGContext *s, int insn) -{ - unsigned retrans = *s->code_ptr & 0xfffc; - tcg_out32(s, insn | retrans); -} - -static void patch_reloc(tcg_insn_unit *code_ptr, int type, - intptr_t value, intptr_t addend) -{ - tcg_insn_unit *target = (tcg_insn_unit *)value; - - assert(addend == 0); - switch (type) { - case R_PPC_REL14: - reloc_pc14(code_ptr, target); - break; - case R_PPC_REL24: - reloc_pc24(code_ptr, target); - break; - default: - tcg_abort(); - } -} - -/* parse target specific constraints */ -static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) -{ - const char *ct_str; - - ct_str = *pct_str; - switch (ct_str[0]) { - case 'A': case 'B': case 'C': case 'D': - ct->ct |= TCG_CT_REG; - tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A'); - break; - case 'r': - ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); - break; - case 'L': /* qemu_ld constraint */ - ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); -#ifdef CONFIG_SOFTMMU - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); -#endif - break; - case 'S': /* qemu_st constraint */ - ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); -#ifdef CONFIG_SOFTMMU - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6); -#endif - break; - case 'I': - ct->ct |= TCG_CT_CONST_S16; - break; - case 'J': - ct->ct |= TCG_CT_CONST_U16; - break; - case 'M': - ct->ct |= TCG_CT_CONST_MONE; - break; - case 'T': - ct->ct |= TCG_CT_CONST_S32; - break; - case 'U': - ct->ct |= TCG_CT_CONST_U32; - break; - case 'Z': - ct->ct |= TCG_CT_CONST_ZERO; - break; - default: - return -1; - } - ct_str++; - *pct_str = ct_str; - return 0; -} - -/* test if a constant matches the constraint */ -static int tcg_target_const_match(tcg_target_long val, TCGType type, - const TCGArgConstraint *arg_ct) -{ - int ct = arg_ct->ct; - if (ct & TCG_CT_CONST) { - return 1; - } - - /* The only 32-bit constraint we use aside from - TCG_CT_CONST is TCG_CT_CONST_S16. */ - if (type == TCG_TYPE_I32) { - val = (int32_t)val; - } - - if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { - return 1; - } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { - return 1; - } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { - return 1; - } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { - return 1; - } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { - return 1; - } else if ((ct & TCG_CT_CONST_MONE) && val == -1) { - return 1; - } - return 0; -} - -#define OPCD(opc) ((opc)<<26) -#define XO19(opc) (OPCD(19)|((opc)<<1)) -#define MD30(opc) (OPCD(30)|((opc)<<2)) -#define MDS30(opc) (OPCD(30)|((opc)<<1)) -#define XO31(opc) (OPCD(31)|((opc)<<1)) -#define XO58(opc) (OPCD(58)|(opc)) -#define XO62(opc) (OPCD(62)|(opc)) - -#define B OPCD( 18) -#define BC OPCD( 16) -#define LBZ OPCD( 34) -#define LHZ OPCD( 40) -#define LHA OPCD( 42) -#define LWZ OPCD( 32) -#define STB OPCD( 38) -#define STH OPCD( 44) -#define STW OPCD( 36) - -#define STD XO62( 0) -#define STDU XO62( 1) -#define STDX XO31(149) - -#define LD XO58( 0) -#define LDX XO31( 21) -#define LDU XO58( 1) -#define LWA XO58( 2) -#define LWAX XO31(341) - -#define ADDIC OPCD( 12) -#define ADDI OPCD( 14) -#define ADDIS OPCD( 15) -#define ORI OPCD( 24) -#define ORIS OPCD( 25) -#define XORI OPCD( 26) -#define XORIS OPCD( 27) -#define ANDI OPCD( 28) -#define ANDIS OPCD( 29) -#define MULLI OPCD( 7) -#define CMPLI OPCD( 10) -#define CMPI OPCD( 11) -#define SUBFIC OPCD( 8) - -#define LWZU OPCD( 33) -#define STWU OPCD( 37) - -#define RLWIMI OPCD( 20) -#define RLWINM OPCD( 21) -#define RLWNM OPCD( 23) - -#define RLDICL MD30( 0) -#define RLDICR MD30( 1) -#define RLDIMI MD30( 3) -#define RLDCL MDS30( 8) - -#define BCLR XO19( 16) -#define BCCTR XO19(528) -#define CRAND XO19(257) -#define CRANDC XO19(129) -#define CRNAND XO19(225) -#define CROR XO19(449) -#define CRNOR XO19( 33) - -#define EXTSB XO31(954) -#define EXTSH XO31(922) -#define EXTSW XO31(986) -#define ADD XO31(266) -#define ADDE XO31(138) -#define ADDME XO31(234) -#define ADDZE XO31(202) -#define ADDC XO31( 10) -#define AND XO31( 28) -#define SUBF XO31( 40) -#define SUBFC XO31( 8) -#define SUBFE XO31(136) -#define SUBFME XO31(232) -#define SUBFZE XO31(200) -#define OR XO31(444) -#define XOR XO31(316) -#define MULLW XO31(235) -#define MULHWU XO31( 11) -#define DIVW XO31(491) -#define DIVWU XO31(459) -#define CMP XO31( 0) -#define CMPL XO31( 32) -#define LHBRX XO31(790) -#define LWBRX XO31(534) -#define LDBRX XO31(532) -#define STHBRX XO31(918) -#define STWBRX XO31(662) -#define STDBRX XO31(660) -#define MFSPR XO31(339) -#define MTSPR XO31(467) -#define SRAWI XO31(824) -#define NEG XO31(104) -#define MFCR XO31( 19) -#define MFOCRF (MFCR | (1u << 20)) -#define NOR XO31(124) -#define CNTLZW XO31( 26) -#define CNTLZD XO31( 58) -#define ANDC XO31( 60) -#define ORC XO31(412) -#define EQV XO31(284) -#define NAND XO31(476) -#define ISEL XO31( 15) - -#define MULLD XO31(233) -#define MULHD XO31( 73) -#define MULHDU XO31( 9) -#define DIVD XO31(489) -#define DIVDU XO31(457) - -#define LBZX XO31( 87) -#define LHZX XO31(279) -#define LHAX XO31(343) -#define LWZX XO31( 23) -#define STBX XO31(215) -#define STHX XO31(407) -#define STWX XO31(151) - -#define SPR(a, b) ((((a)<<5)|(b))<<11) -#define LR SPR(8, 0) -#define CTR SPR(9, 0) - -#define SLW XO31( 24) -#define SRW XO31(536) -#define SRAW XO31(792) - -#define SLD XO31( 27) -#define SRD XO31(539) -#define SRAD XO31(794) -#define SRADI XO31(413<<1) - -#define TW XO31( 4) -#define TRAP (TW | TO(31)) - -#define RT(r) ((r)<<21) -#define RS(r) ((r)<<21) -#define RA(r) ((r)<<16) -#define RB(r) ((r)<<11) -#define TO(t) ((t)<<21) -#define SH(s) ((s)<<11) -#define MB(b) ((b)<<6) -#define ME(e) ((e)<<1) -#define BO(o) ((o)<<21) -#define MB64(b) ((b)<<5) -#define FXM(b) (1 << (19 - (b))) - -#define LK 1 - -#define TAB(t, a, b) (RT(t) | RA(a) | RB(b)) -#define SAB(s, a, b) (RS(s) | RA(a) | RB(b)) -#define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff)) -#define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff)) - -#define BF(n) ((n)<<23) -#define BI(n, c) (((c)+((n)*4))<<16) -#define BT(n, c) (((c)+((n)*4))<<21) -#define BA(n, c) (((c)+((n)*4))<<16) -#define BB(n, c) (((c)+((n)*4))<<11) -#define BC_(n, c) (((c)+((n)*4))<<6) - -#define BO_COND_TRUE BO(12) -#define BO_COND_FALSE BO( 4) -#define BO_ALWAYS BO(20) - -enum { - CR_LT, - CR_GT, - CR_EQ, - CR_SO -}; - -static const uint32_t tcg_to_bc[] = { - [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE, - [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE, - [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE, - [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE, - [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE, - [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE, - [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE, - [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE, - [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE, - [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE, -}; - -/* The low bit here is set if the RA and RB fields must be inverted. */ -static const uint32_t tcg_to_isel[] = { - [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ), - [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1, - [TCG_COND_LT] = ISEL | BC_(7, CR_LT), - [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1, - [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1, - [TCG_COND_GT] = ISEL | BC_(7, CR_GT), - [TCG_COND_LTU] = ISEL | BC_(7, CR_LT), - [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1, - [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1, - [TCG_COND_GTU] = ISEL | BC_(7, CR_GT), -}; - -static inline void tcg_out_mov(TCGContext *s, TCGType type, - TCGReg ret, TCGReg arg) -{ - if (ret != arg) { - tcg_out32(s, OR | SAB(arg, ret, arg)); - } -} - -static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, - int sh, int mb) -{ - sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1); - mb = MB64((mb >> 5) | ((mb << 1) & 0x3f)); - tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb); -} - -static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, - int sh, int mb, int me) -{ - tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me)); -} - -static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src) -{ - tcg_out_rld(s, RLDICL, dst, src, 0, 32); -} - -static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c) -{ - tcg_out_rld(s, RLDICR, dst, src, c, 63 - c); -} - -static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c) -{ - tcg_out_rld(s, RLDICL, dst, src, 64 - c, c); -} - -static void tcg_out_movi32(TCGContext *s, TCGReg ret, int32_t arg) -{ - if (arg == (int16_t) arg) { - tcg_out32(s, ADDI | TAI(ret, 0, arg)); - } else { - tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); - if (arg & 0xffff) { - tcg_out32(s, ORI | SAI(ret, ret, arg)); - } - } -} - -static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, - tcg_target_long arg) -{ - if (type == TCG_TYPE_I32 || arg == (int32_t)arg) { - tcg_out_movi32(s, ret, arg); - } else if (arg == (uint32_t)arg && !(arg & 0x8000)) { - tcg_out32(s, ADDI | TAI(ret, 0, arg)); - tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); - } else { - int32_t high = arg >> 32; - tcg_out_movi32(s, ret, high); - if (high) { - tcg_out_shli64(s, ret, ret, 32); - } - if (arg & 0xffff0000) { - tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); - } - if (arg & 0xffff) { - tcg_out32(s, ORI | SAI(ret, ret, arg)); - } - } -} - -static bool mask_operand(uint32_t c, int *mb, int *me) -{ - uint32_t lsb, test; - - /* Accept a bit pattern like: - 0....01....1 - 1....10....0 - 0..01..10..0 - Keep track of the transitions. */ - if (c == 0 || c == -1) { - return false; - } - test = c; - lsb = test & -test; - test += lsb; - if (test & (test - 1)) { - return false; - } - - *me = clz32(lsb); - *mb = test ? clz32(test & -test) + 1 : 0; - return true; -} - -static bool mask64_operand(uint64_t c, int *mb, int *me) -{ - uint64_t lsb; - - if (c == 0) { - return false; - } - - lsb = c & -c; - /* Accept 1..10..0. */ - if (c == -lsb) { - *mb = 0; - *me = clz64(lsb); - return true; - } - /* Accept 0..01..1. */ - if (lsb == 1 && (c & (c + 1)) == 0) { - *mb = clz64(c + 1) + 1; - *me = 63; - return true; - } - return false; -} - -static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) -{ - int mb, me; - - if ((c & 0xffff) == c) { - tcg_out32(s, ANDI | SAI(src, dst, c)); - return; - } else if ((c & 0xffff0000) == c) { - tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); - return; - } else if (mask_operand(c, &mb, &me)) { - tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me); - } else { - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c); - tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); - } -} - -static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c) -{ - int mb, me; - - if ((c & 0xffff) == c) { - tcg_out32(s, ANDI | SAI(src, dst, c)); - return; - } else if ((c & 0xffff0000) == c) { - tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); - return; - } else if (mask64_operand(c, &mb, &me)) { - if (mb == 0) { - tcg_out_rld(s, RLDICR, dst, src, 0, me); - } else { - tcg_out_rld(s, RLDICL, dst, src, 0, mb); - } - } else { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c); - tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); - } -} - -static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c, - int op_lo, int op_hi) -{ - if (c >> 16) { - tcg_out32(s, op_hi | SAI(src, dst, c >> 16)); - src = dst; - } - if (c & 0xffff) { - tcg_out32(s, op_lo | SAI(src, dst, c)); - src = dst; - } -} - -static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) -{ - tcg_out_zori32(s, dst, src, c, ORI, ORIS); -} - -static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) -{ - tcg_out_zori32(s, dst, src, c, XORI, XORIS); -} - -static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target) -{ - ptrdiff_t disp = tcg_pcrel_diff(s, target); - if (in_range_b(disp)) { - tcg_out32(s, B | (disp & 0x3fffffc) | mask); - } else { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, (uintptr_t)target); - tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR); - tcg_out32(s, BCCTR | BO_ALWAYS | mask); - } -} - -static void tcg_out_call(TCGContext *s, tcg_insn_unit *target) -{ -#ifdef __APPLE__ - tcg_out_b(s, LK, target); -#else - /* Look through the descriptor. If the branch is in range, and we - don't have to spend too much effort on building the toc. */ - void *tgt = ((void **)target)[0]; - uintptr_t toc = ((uintptr_t *)target)[1]; - intptr_t diff = tcg_pcrel_diff(s, tgt); - - if (in_range_b(diff) && toc == (uint32_t)toc) { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R2, toc); - tcg_out_b(s, LK, tgt); - } else { - /* Fold the low bits of the constant into the addresses below. */ - intptr_t arg = (intptr_t)target; - int ofs = (int16_t)arg; - - if (ofs + 8 < 0x8000) { - arg -= ofs; - } else { - ofs = 0; - } - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R2, arg); - tcg_out32(s, LD | TAI(TCG_REG_R0, TCG_REG_R2, ofs)); - tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR); - tcg_out32(s, LD | TAI(TCG_REG_R2, TCG_REG_R2, ofs + 8)); - tcg_out32(s, BCCTR | BO_ALWAYS | LK); - } -#endif -} - -static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, - TCGReg base, tcg_target_long offset) -{ - tcg_target_long orig = offset, l0, l1, extra = 0, align = 0; - TCGReg rs = TCG_REG_R2; - - assert(rt != TCG_REG_R2 && base != TCG_REG_R2); - - switch (opi) { - case LD: case LWA: - align = 3; - /* FALLTHRU */ - default: - if (rt != TCG_REG_R0) { - rs = rt; - } - break; - case STD: - align = 3; - break; - case STB: case STH: case STW: - break; - } - - /* For unaligned, or very large offsets, use the indexed form. */ - if (offset & align || offset != (int32_t)offset) { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R2, orig); - tcg_out32(s, opx | TAB(rt, base, TCG_REG_R2)); - return; - } - - l0 = (int16_t)offset; - offset = (offset - l0) >> 16; - l1 = (int16_t)offset; - - if (l1 < 0 && orig >= 0) { - extra = 0x4000; - l1 = (int16_t)(offset - 0x4000); - } - if (l1) { - tcg_out32(s, ADDIS | TAI(rs, base, l1)); - base = rs; - } - if (extra) { - tcg_out32(s, ADDIS | TAI(rs, base, extra)); - base = rs; - } - if (opi != ADDI || base != rt || l0 != 0) { - tcg_out32(s, opi | TAI(rt, base, l0)); - } -} - -static const uint32_t qemu_ldx_opc[16] = { - [MO_UB] = LBZX, - [MO_UW] = LHZX, - [MO_UL] = LWZX, - [MO_Q] = LDX, - [MO_SW] = LHAX, - [MO_SL] = LWAX, - [MO_BSWAP | MO_UB] = LBZX, - [MO_BSWAP | MO_UW] = LHBRX, - [MO_BSWAP | MO_UL] = LWBRX, - [MO_BSWAP | MO_Q] = LDBRX, -}; - -static const uint32_t qemu_stx_opc[16] = { - [MO_UB] = STBX, - [MO_UW] = STHX, - [MO_UL] = STWX, - [MO_Q] = STDX, - [MO_BSWAP | MO_UB] = STBX, - [MO_BSWAP | MO_UW] = STHBRX, - [MO_BSWAP | MO_UL] = STWBRX, - [MO_BSWAP | MO_Q] = STDBRX, -}; - -static const uint32_t qemu_exts_opc[4] = { - EXTSB, EXTSH, EXTSW, 0 -}; - -#if defined (CONFIG_SOFTMMU) -/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, - * int mmu_idx, uintptr_t ra) - */ -static void * const qemu_ld_helpers[16] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEQ] = helper_be_ldq_mmu, -}; - -/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, int mmu_idx, uintptr_t ra) - */ -static void * const qemu_st_helpers[16] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, -}; - -/* Perform the TLB load and compare. Places the result of the comparison - in CR7, loads the addend of the TLB into R3, and returns the register - containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ - -static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits, TCGReg addr_reg, - int mem_index, bool is_read) -{ - int cmp_off - = (is_read - ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) - : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); - int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); - TCGReg base = TCG_AREG0; - - /* Extract the page index, shifted into place for tlb index. */ - if (TARGET_LONG_BITS == 32) { - /* Zero-extend the address into a place helpful for further use. */ - tcg_out_ext32u(s, TCG_REG_R4, addr_reg); - addr_reg = TCG_REG_R4; - } else { - tcg_out_rld(s, RLDICL, TCG_REG_R3, addr_reg, - 64 - TARGET_PAGE_BITS, 64 - CPU_TLB_BITS); - } - - /* Compensate for very large offsets. */ - if (add_off >= 0x8000) { - /* Most target env are smaller than 32k; none are larger than 64k. - Simplify the logic here merely to offset by 0x7ff0, giving us a - range just shy of 64k. Check this assumption. */ - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, - tlb_table[NB_MMU_MODES - 1][1]) - > 0x7ff0 + 0x7fff); - tcg_out32(s, ADDI | TAI(TCG_REG_R2, base, 0x7ff0)); - base = TCG_REG_R2; - cmp_off -= 0x7ff0; - add_off -= 0x7ff0; - } - - /* Extraction and shifting, part 2. */ - if (TARGET_LONG_BITS == 32) { - tcg_out_rlw(s, RLWINM, TCG_REG_R3, addr_reg, - 32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), - 32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS), - 31 - CPU_TLB_ENTRY_BITS); - } else { - tcg_out_shli64(s, TCG_REG_R3, TCG_REG_R3, CPU_TLB_ENTRY_BITS); - } - - tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, base)); - - /* Load the tlb comparator. */ - tcg_out32(s, LD_ADDR | TAI(TCG_REG_R2, TCG_REG_R3, cmp_off)); - - /* Load the TLB addend for use on the fast path. Do this asap - to minimize any load use delay. */ - tcg_out32(s, LD | TAI(TCG_REG_R3, TCG_REG_R3, add_off)); - - /* Clear the non-page, non-alignment bits from the address. */ - if (TARGET_LONG_BITS == 32) { - tcg_out_rlw(s, RLWINM, TCG_REG_R0, addr_reg, 0, - (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS); - } else if (!s_bits) { - tcg_out_rld(s, RLDICR, TCG_REG_R0, addr_reg, 0, 63 - TARGET_PAGE_BITS); - } else { - tcg_out_rld(s, RLDICL, TCG_REG_R0, addr_reg, - 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits); - tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); - } - - tcg_out32(s, CMP | BF(7) | RA(TCG_REG_R0) | RB(TCG_REG_R2) | CMP_L); - - return addr_reg; -} - -/* Record the context of a call to the out of line helper code for the slow - path for a load or store, so that we can later generate the correct - helper code. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, - int data_reg, int addr_reg, int mem_index, - tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->opc = opc; - label->datalo_reg = data_reg; - label->addrlo_reg = addr_reg; - label->mem_index = mem_index; - label->raddr = raddr; - label->label_ptr[0] = label_ptr; -} - -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) -{ - TCGMemOp opc = lb->opc; - - reloc_pc14(lb->label_ptr[0], s->code_ptr); - - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0); - - /* If the address needed to be zero-extended, we'll have already - placed it in R4. The only remaining case is 64-bit guest. */ - tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, lb->addrlo_reg); - - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, lb->mem_index); - tcg_out32(s, MFSPR | RT(TCG_REG_R6) | LR); - - tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); - - if (opc & MO_SIGN) { - uint32_t insn = qemu_exts_opc[opc & MO_SIZE]; - tcg_out32(s, insn | RA(lb->datalo_reg) | RS(TCG_REG_R3)); - } else { - tcg_out_mov(s, TCG_TYPE_I64, lb->datalo_reg, TCG_REG_R3); - } - - tcg_out_b(s, 0, lb->raddr); -} - -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) -{ - TCGMemOp opc = lb->opc; - TCGMemOp s_bits = opc & MO_SIZE; - - reloc_pc14(lb->label_ptr[0], s->code_ptr); - - tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, TCG_AREG0); - - /* If the address needed to be zero-extended, we'll have already - placed it in R4. The only remaining case is 64-bit guest. */ - tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, lb->addrlo_reg); - - tcg_out_rld(s, RLDICL, TCG_REG_R5, lb->datalo_reg, - 0, 64 - (1 << (3 + s_bits))); - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R6, lb->mem_index); - tcg_out32(s, MFSPR | RT(TCG_REG_R7) | LR); - - tcg_out_call(s, qemu_st_helpers[opc]); - - tcg_out_b(s, 0, lb->raddr); -} -#endif /* SOFTMMU */ - -static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOp opc, int mem_index) -{ - TCGReg rbase; - uint32_t insn; - TCGMemOp s_bits = opc & MO_SIZE; -#ifdef CONFIG_SOFTMMU - tcg_insn_unit *label_ptr; -#endif - -#ifdef CONFIG_SOFTMMU - addr_reg = tcg_out_tlb_read(s, s_bits, addr_reg, mem_index, true); - - /* Load a pointer into the current opcode w/conditional branch-link. */ - label_ptr = s->code_ptr; - tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); - - rbase = TCG_REG_R3; -#else /* !CONFIG_SOFTMMU */ - rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; - if (TARGET_LONG_BITS == 32) { - tcg_out_ext32u(s, TCG_REG_R2, addr_reg); - addr_reg = TCG_REG_R2; - } -#endif - - insn = qemu_ldx_opc[opc]; - if (!HAVE_ISA_2_06 && insn == LDBRX) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, addr_reg, 4)); - tcg_out32(s, LWBRX | TAB(data_reg, rbase, addr_reg)); - tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); - tcg_out_rld(s, RLDIMI, data_reg, TCG_REG_R0, 32, 0); - } else if (insn) { - tcg_out32(s, insn | TAB(data_reg, rbase, addr_reg)); - } else { - insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; - tcg_out32(s, insn | TAB(data_reg, rbase, addr_reg)); - insn = qemu_exts_opc[s_bits]; - tcg_out32(s, insn | RA(data_reg) | RS(data_reg)); - } - -#ifdef CONFIG_SOFTMMU - add_qemu_ldst_label(s, true, opc, data_reg, addr_reg, mem_index, - s->code_ptr, label_ptr); -#endif -} - -static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOp opc, int mem_index) -{ - TCGReg rbase; - uint32_t insn; -#ifdef CONFIG_SOFTMMU - tcg_insn_unit *label_ptr; -#endif - -#ifdef CONFIG_SOFTMMU - addr_reg = tcg_out_tlb_read(s, opc & MO_SIZE, addr_reg, mem_index, false); - - /* Load a pointer into the current opcode w/conditional branch-link. */ - label_ptr = s->code_ptr; - tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); - - rbase = TCG_REG_R3; -#else /* !CONFIG_SOFTMMU */ - rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; - if (TARGET_LONG_BITS == 32) { - tcg_out_ext32u(s, TCG_REG_R2, addr_reg); - addr_reg = TCG_REG_R2; - } -#endif - - insn = qemu_stx_opc[opc]; - if (!HAVE_ISA_2_06 && insn == STDBRX) { - tcg_out32(s, STWBRX | SAB(data_reg, rbase, addr_reg)); - tcg_out32(s, ADDI | TAI(TCG_REG_R2, addr_reg, 4)); - tcg_out_shri64(s, TCG_REG_R0, data_reg, 32); - tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_R2)); - } else { - tcg_out32(s, insn | SAB(data_reg, rbase, addr_reg)); - } - -#ifdef CONFIG_SOFTMMU - add_qemu_ldst_label(s, false, opc, data_reg, addr_reg, mem_index, - s->code_ptr, label_ptr); -#endif -} - -#define FRAME_SIZE ((int) \ - ((8 /* back chain */ \ - + 8 /* CR */ \ - + 8 /* LR */ \ - + 8 /* compiler doubleword */ \ - + 8 /* link editor doubleword */ \ - + 8 /* TOC save area */ \ - + TCG_STATIC_CALL_ARGS_SIZE \ - + CPU_TEMP_BUF_NLONGS * sizeof(long) \ - + ARRAY_SIZE(tcg_target_callee_save_regs) * 8 \ - + 15) & ~15)) - -#define REG_SAVE_BOT (FRAME_SIZE - ARRAY_SIZE(tcg_target_callee_save_regs) * 8) - -static void tcg_target_qemu_prologue(TCGContext *s) -{ - int i; - - tcg_set_frame(s, TCG_REG_CALL_STACK, - REG_SAVE_BOT - CPU_TEMP_BUF_NLONGS * sizeof(long), - CPU_TEMP_BUF_NLONGS * sizeof(long)); - -#ifndef __APPLE__ - /* First emit adhoc function descriptor */ - tcg_out64(s, (uint64_t)s->code_ptr + 24); /* entry point */ - tcg_out64(s, 0); /* toc */ - tcg_out64(s, 0); /* environment pointer */ -#endif - - /* Prologue */ - tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR); - tcg_out32(s, STDU | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE)); - for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { - tcg_out32(s, STD | SAI(tcg_target_callee_save_regs[i], 1, - REG_SAVE_BOT + i * 8)); - } - tcg_out32(s, STD | SAI(TCG_REG_R0, TCG_REG_R1, FRAME_SIZE + 16)); - -#ifdef CONFIG_USE_GUEST_BASE - if (GUEST_BASE) { - tcg_out_movi(s, TCG_TYPE_I64, TCG_GUEST_BASE_REG, GUEST_BASE); - tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); - } -#endif - - tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); - tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR); - tcg_out32(s, BCCTR | BO_ALWAYS); - - /* Epilogue */ - tb_ret_addr = s->code_ptr; - - for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { - tcg_out32(s, LD | TAI(tcg_target_callee_save_regs[i], TCG_REG_R1, - REG_SAVE_BOT + i * 8)); - } - tcg_out32(s, LD | TAI(TCG_REG_R0, TCG_REG_R1, FRAME_SIZE + 16)); - tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR); - tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE)); - tcg_out32(s, BCLR | BO_ALWAYS); -} - -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, - TCGReg arg1, intptr_t arg2) -{ - int opi, opx; - - if (type == TCG_TYPE_I32) { - opi = LWZ, opx = LWZX; - } else { - opi = LD, opx = LDX; - } - tcg_out_mem_long(s, opi, opx, ret, arg1, arg2); -} - -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) -{ - int opi, opx; - - if (type == TCG_TYPE_I32) { - opi = STW, opx = STWX; - } else { - opi = STD, opx = STDX; - } - tcg_out_mem_long(s, opi, opx, arg, arg1, arg2); -} - -static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, - int const_arg2, int cr, TCGType type) -{ - int imm; - uint32_t op; - - /* Simplify the comparisons below wrt CMPI. */ - if (type == TCG_TYPE_I32) { - arg2 = (int32_t)arg2; - } - - switch (cond) { - case TCG_COND_EQ: - case TCG_COND_NE: - if (const_arg2) { - if ((int16_t) arg2 == arg2) { - op = CMPI; - imm = 1; - break; - } else if ((uint16_t) arg2 == arg2) { - op = CMPLI; - imm = 1; - break; - } - } - op = CMPL; - imm = 0; - break; - - case TCG_COND_LT: - case TCG_COND_GE: - case TCG_COND_LE: - case TCG_COND_GT: - if (const_arg2) { - if ((int16_t) arg2 == arg2) { - op = CMPI; - imm = 1; - break; - } - } - op = CMP; - imm = 0; - break; - - case TCG_COND_LTU: - case TCG_COND_GEU: - case TCG_COND_LEU: - case TCG_COND_GTU: - if (const_arg2) { - if ((uint16_t) arg2 == arg2) { - op = CMPLI; - imm = 1; - break; - } - } - op = CMPL; - imm = 0; - break; - - default: - tcg_abort(); - } - op |= BF(cr) | ((type == TCG_TYPE_I64) << 21); - - if (imm) { - tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff)); - } else { - if (const_arg2) { - tcg_out_movi(s, type, TCG_REG_R0, arg2); - arg2 = TCG_REG_R0; - } - tcg_out32(s, op | RA(arg1) | RB(arg2)); - } -} - -static void tcg_out_setcond_eq0(TCGContext *s, TCGType type, - TCGReg dst, TCGReg src) -{ - tcg_out32(s, (type == TCG_TYPE_I64 ? CNTLZD : CNTLZW) | RS(src) | RA(dst)); - tcg_out_shri64(s, dst, dst, type == TCG_TYPE_I64 ? 6 : 5); -} - -static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src) -{ - /* X != 0 implies X + -1 generates a carry. Extra addition - trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */ - if (dst != src) { - tcg_out32(s, ADDIC | TAI(dst, src, -1)); - tcg_out32(s, SUBFE | TAB(dst, dst, src)); - } else { - tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1)); - tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src)); - } -} - -static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2, - bool const_arg2) -{ - if (const_arg2) { - if ((uint32_t)arg2 == arg2) { - tcg_out_xori32(s, TCG_REG_R0, arg1, arg2); - } else { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2); - tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0)); - } - } else { - tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2)); - } - return TCG_REG_R0; -} - -static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, - TCGArg arg0, TCGArg arg1, TCGArg arg2, - int const_arg2) -{ - int crop, sh; - - /* Ignore high bits of a potential constant arg2. */ - if (type == TCG_TYPE_I32) { - arg2 = (uint32_t)arg2; - } - - /* Handle common and trivial cases before handling anything else. */ - if (arg2 == 0) { - switch (cond) { - case TCG_COND_EQ: - tcg_out_setcond_eq0(s, type, arg0, arg1); - return; - case TCG_COND_NE: - if (type == TCG_TYPE_I32) { - tcg_out_ext32u(s, TCG_REG_R0, arg1); - arg1 = TCG_REG_R0; - } - tcg_out_setcond_ne0(s, arg0, arg1); - return; - case TCG_COND_GE: - tcg_out32(s, NOR | SAB(arg1, arg0, arg1)); - arg1 = arg0; - /* FALLTHRU */ - case TCG_COND_LT: - /* Extract the sign bit. */ - tcg_out_rld(s, RLDICL, arg0, arg1, - type == TCG_TYPE_I64 ? 1 : 33, 63); - return; - default: - break; - } - } - - /* If we have ISEL, we can implement everything with 3 or 4 insns. - All other cases below are also at least 3 insns, so speed up the - code generator by not considering them and always using ISEL. */ - if (HAVE_ISEL) { - int isel, tab; - - tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); - - isel = tcg_to_isel[cond]; - - tcg_out_movi(s, type, arg0, 1); - if (isel & 1) { - /* arg0 = (bc ? 0 : 1) */ - tab = TAB(arg0, 0, arg0); - isel &= ~1; - } else { - /* arg0 = (bc ? 1 : 0) */ - tcg_out_movi(s, type, TCG_REG_R0, 0); - tab = TAB(arg0, arg0, TCG_REG_R0); - } - tcg_out32(s, isel | tab); - return; - } - - switch (cond) { - case TCG_COND_EQ: - arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); - tcg_out_setcond_eq0(s, type, arg0, arg1); - return; - - case TCG_COND_NE: - arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); - /* Discard the high bits only once, rather than both inputs. */ - if (type == TCG_TYPE_I32) { - tcg_out_ext32u(s, TCG_REG_R0, arg1); - arg1 = TCG_REG_R0; - } - tcg_out_setcond_ne0(s, arg0, arg1); - return; - - case TCG_COND_GT: - case TCG_COND_GTU: - sh = 30; - crop = 0; - goto crtest; - - case TCG_COND_LT: - case TCG_COND_LTU: - sh = 29; - crop = 0; - goto crtest; - - case TCG_COND_GE: - case TCG_COND_GEU: - sh = 31; - crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT); - goto crtest; - - case TCG_COND_LE: - case TCG_COND_LEU: - sh = 31; - crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT); - crtest: - tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); - if (crop) { - tcg_out32(s, crop); - } - tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7)); - tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31); - break; - - default: - tcg_abort(); - } -} - -static void tcg_out_bc(TCGContext *s, int bc, int label_index) -{ - TCGLabel *l = &s->labels[label_index]; - - if (l->has_value) { - tcg_out32(s, bc | reloc_pc14_val(s->code_ptr, l->u.value_ptr)); - } else { - tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, label_index, 0); - tcg_out_bc_noaddr(s, bc); - } -} - -static void tcg_out_brcond(TCGContext *s, TCGCond cond, - TCGArg arg1, TCGArg arg2, int const_arg2, - int label_index, TCGType type) -{ - tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); - tcg_out_bc(s, tcg_to_bc[cond], label_index); -} - -static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, - TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1, - TCGArg v2, bool const_c2) -{ - /* If for some reason both inputs are zero, don't produce bad code. */ - if (v1 == 0 && v2 == 0) { - tcg_out_movi(s, type, dest, 0); - return; - } - - tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type); - - if (HAVE_ISEL) { - int isel = tcg_to_isel[cond]; - - /* Swap the V operands if the operation indicates inversion. */ - if (isel & 1) { - int t = v1; - v1 = v2; - v2 = t; - isel &= ~1; - } - /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */ - if (v2 == 0) { - tcg_out_movi(s, type, TCG_REG_R0, 0); - } - tcg_out32(s, isel | TAB(dest, v1, v2)); - } else { - if (dest == v2) { - cond = tcg_invert_cond(cond); - v2 = v1; - } else if (dest != v1) { - if (v1 == 0) { - tcg_out_movi(s, type, dest, 0); - } else { - tcg_out_mov(s, type, dest, v1); - } - } - /* Branch forward over one insn */ - tcg_out32(s, tcg_to_bc[cond] | 8); - if (v2 == 0) { - tcg_out_movi(s, type, dest, 0); - } else { - tcg_out_mov(s, type, dest, v2); - } - } -} - -void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) -{ - TCGContext s; - - s.code_buf = s.code_ptr = (tcg_insn_unit *)jmp_addr; - tcg_out_b(&s, 0, (tcg_insn_unit *)addr); - flush_icache_range(jmp_addr, jmp_addr + tcg_current_code_size(&s)); -} - -static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, - const int *const_args) -{ - TCGArg a0, a1, a2; - int c; - - switch (opc) { - case INDEX_op_exit_tb: - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R3, args[0]); - tcg_out_b(s, 0, tb_ret_addr); - break; - case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { - /* Direct jump method. */ - s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); - s->code_ptr += 7; - } else { - /* Indirect jump method. */ - tcg_abort(); - } - s->tb_next_offset[args[0]] = tcg_current_code_size(s); - break; - case INDEX_op_br: - { - TCGLabel *l = &s->labels[args[0]]; - - if (l->has_value) { - tcg_out_b(s, 0, l->u.value_ptr); - } else { - tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, args[0], 0); - tcg_out_b_noaddr(s, B); - } - } - break; - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); - break; - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); - tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0])); - break; - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]); - break; - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]); - break; - case INDEX_op_ld_i32: - case INDEX_op_ld32u_i64: - tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]); - break; - case INDEX_op_ld32s_i64: - tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]); - break; - case INDEX_op_ld_i64: - tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]); - break; - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]); - break; - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]); - break; - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]); - break; - case INDEX_op_st_i64: - tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]); - break; - - case INDEX_op_add_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - do_addi_32: - tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2); - } else { - tcg_out32(s, ADD | TAB(a0, a1, a2)); - } - break; - case INDEX_op_sub_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[1]) { - if (const_args[2]) { - tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2); - } else { - tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); - } - } else if (const_args[2]) { - a2 = -a2; - goto do_addi_32; - } else { - tcg_out32(s, SUBF | TAB(a0, a2, a1)); - } - break; - - case INDEX_op_and_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_andi32(s, a0, a1, a2); - } else { - tcg_out32(s, AND | SAB(a1, a0, a2)); - } - break; - case INDEX_op_and_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_andi64(s, a0, a1, a2); - } else { - tcg_out32(s, AND | SAB(a1, a0, a2)); - } - break; - case INDEX_op_or_i64: - case INDEX_op_or_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_ori32(s, a0, a1, a2); - } else { - tcg_out32(s, OR | SAB(a1, a0, a2)); - } - break; - case INDEX_op_xor_i64: - case INDEX_op_xor_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_xori32(s, a0, a1, a2); - } else { - tcg_out32(s, XOR | SAB(a1, a0, a2)); - } - break; - case INDEX_op_andc_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_andi32(s, a0, a1, ~a2); - } else { - tcg_out32(s, ANDC | SAB(a1, a0, a2)); - } - break; - case INDEX_op_andc_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_andi64(s, a0, a1, ~a2); - } else { - tcg_out32(s, ANDC | SAB(a1, a0, a2)); - } - break; - case INDEX_op_orc_i32: - if (const_args[2]) { - tcg_out_ori32(s, args[0], args[1], ~args[2]); - break; - } - /* FALLTHRU */ - case INDEX_op_orc_i64: - tcg_out32(s, ORC | SAB(args[1], args[0], args[2])); - break; - case INDEX_op_eqv_i32: - if (const_args[2]) { - tcg_out_xori32(s, args[0], args[1], ~args[2]); - break; - } - /* FALLTHRU */ - case INDEX_op_eqv_i64: - tcg_out32(s, EQV | SAB(args[1], args[0], args[2])); - break; - case INDEX_op_nand_i32: - case INDEX_op_nand_i64: - tcg_out32(s, NAND | SAB(args[1], args[0], args[2])); - break; - case INDEX_op_nor_i32: - case INDEX_op_nor_i64: - tcg_out32(s, NOR | SAB(args[1], args[0], args[2])); - break; - - case INDEX_op_mul_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out32(s, MULLI | TAI(a0, a1, a2)); - } else { - tcg_out32(s, MULLW | TAB(a0, a1, a2)); - } - break; - - case INDEX_op_div_i32: - tcg_out32(s, DIVW | TAB(args[0], args[1], args[2])); - break; - - case INDEX_op_divu_i32: - tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2])); - break; - - case INDEX_op_shl_i32: - if (const_args[2]) { - tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31 - args[2]); - } else { - tcg_out32(s, SLW | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_shr_i32: - if (const_args[2]) { - tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], args[2], 31); - } else { - tcg_out32(s, SRW | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_sar_i32: - if (const_args[2]) { - tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2])); - } else { - tcg_out32(s, SRAW | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_rotl_i32: - if (const_args[2]) { - tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31); - } else { - tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2]) - | MB(0) | ME(31)); - } - break; - case INDEX_op_rotr_i32: - if (const_args[2]) { - tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31); - } else { - tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32)); - tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0) - | MB(0) | ME(31)); - } - break; - - case INDEX_op_brcond_i32: - tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], - args[3], TCG_TYPE_I32); - break; - - case INDEX_op_brcond_i64: - tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], - args[3], TCG_TYPE_I64); - break; - - case INDEX_op_neg_i32: - case INDEX_op_neg_i64: - tcg_out32(s, NEG | RT(args[0]) | RA(args[1])); - break; - - case INDEX_op_not_i32: - case INDEX_op_not_i64: - tcg_out32(s, NOR | SAB(args[1], args[0], args[1])); - break; - - case INDEX_op_add_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - do_addi_64: - tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2); - } else { - tcg_out32(s, ADD | TAB(a0, a1, a2)); - } - break; - case INDEX_op_sub_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[1]) { - if (const_args[2]) { - tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2); - } else { - tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); - } - } else if (const_args[2]) { - a2 = -a2; - goto do_addi_64; - } else { - tcg_out32(s, SUBF | TAB(a0, a2, a1)); - } - break; - - case INDEX_op_shl_i64: - if (const_args[2]) { - tcg_out_shli64(s, args[0], args[1], args[2]); - } else { - tcg_out32(s, SLD | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_shr_i64: - if (const_args[2]) { - tcg_out_shri64(s, args[0], args[1], args[2]); - } else { - tcg_out32(s, SRD | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_sar_i64: - if (const_args[2]) { - int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1); - tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh); - } else { - tcg_out32(s, SRAD | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_rotl_i64: - if (const_args[2]) { - tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0); - } else { - tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0)); - } - break; - case INDEX_op_rotr_i64: - if (const_args[2]) { - tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0); - } else { - tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64)); - tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0)); - } - break; - - case INDEX_op_mul_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out32(s, MULLI | TAI(a0, a1, a2)); - } else { - tcg_out32(s, MULLD | TAB(a0, a1, a2)); - } - break; - case INDEX_op_div_i64: - tcg_out32(s, DIVD | TAB(args[0], args[1], args[2])); - break; - case INDEX_op_divu_i64: - tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2])); - break; - - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3]); - break; - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args[0], args[1], args[2], args[3]); - break; - - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - c = EXTSB; - goto gen_ext; - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - c = EXTSH; - goto gen_ext; - case INDEX_op_ext32s_i64: - c = EXTSW; - goto gen_ext; - gen_ext: - tcg_out32(s, c | RS(args[1]) | RA(args[0])); - break; - - case INDEX_op_setcond_i32: - tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2], - const_args[2]); - break; - case INDEX_op_setcond_i64: - tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2], - const_args[2]); - break; - - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - a0 = args[0], a1 = args[1]; - /* a1 = abcd */ - if (a0 != a1) { - /* a0 = (a1 r<< 24) & 0xff # 000c */ - tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31); - /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */ - tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23); - } else { - /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */ - tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23); - /* a0 = (a1 r<< 24) & 0xff # 000c */ - tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31); - /* a0 = a0 | r0 # 00dc */ - tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0)); - } - break; - - case INDEX_op_bswap32_i32: - case INDEX_op_bswap32_i64: - /* Stolen from gcc's builtin_bswap32 */ - a1 = args[1]; - a0 = args[0] == a1 ? TCG_REG_R0 : args[0]; - - /* a1 = args[1] # abcd */ - /* a0 = rotate_left (a1, 8) # bcda */ - tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31); - /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */ - tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7); - /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */ - tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23); - - if (a0 == TCG_REG_R0) { - tcg_out_mov(s, TCG_TYPE_I64, args[0], a0); - } - break; - - case INDEX_op_bswap64_i64: - a0 = args[0], a1 = args[1], a2 = TCG_REG_R0; - if (a0 == a1) { - a0 = TCG_REG_R0; - a2 = a1; - } - - /* a1 = # abcd efgh */ - /* a0 = rl32(a1, 8) # 0000 fghe */ - tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31); - /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */ - tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7); - /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */ - tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23); - - /* a0 = rl64(a0, 32) # hgfe 0000 */ - /* a2 = rl64(a1, 32) # efgh abcd */ - tcg_out_rld(s, RLDICL, a0, a0, 32, 0); - tcg_out_rld(s, RLDICL, a2, a1, 32, 0); - - /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */ - tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31); - /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */ - tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7); - /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */ - tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23); - - if (a0 == 0) { - tcg_out_mov(s, TCG_TYPE_I64, args[0], a0); - } - break; - - case INDEX_op_deposit_i32: - if (const_args[2]) { - uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3]; - tcg_out_andi32(s, args[0], args[0], ~mask); - } else { - tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3], - 32 - args[3] - args[4], 31 - args[3]); - } - break; - case INDEX_op_deposit_i64: - if (const_args[2]) { - uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3]; - tcg_out_andi64(s, args[0], args[0], ~mask); - } else { - tcg_out_rld(s, RLDIMI, args[0], args[2], args[3], - 64 - args[3] - args[4]); - } - break; - - case INDEX_op_movcond_i32: - tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2], - args[3], args[4], const_args[2]); - break; - case INDEX_op_movcond_i64: - tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2], - args[3], args[4], const_args[2]); - break; - - case INDEX_op_add2_i64: - /* Note that the CA bit is defined based on the word size of the - environment. So in 64-bit mode it's always carry-out of bit 63. - The fallback code using deposit works just as well for 32-bit. */ - a0 = args[0], a1 = args[1]; - if (a0 == args[3] || (!const_args[5] && a0 == args[5])) { - a0 = TCG_REG_R0; - } - if (const_args[4]) { - tcg_out32(s, ADDIC | TAI(a0, args[2], args[4])); - } else { - tcg_out32(s, ADDC | TAB(a0, args[2], args[4])); - } - if (const_args[5]) { - tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3])); - } else { - tcg_out32(s, ADDE | TAB(a1, args[3], args[5])); - } - if (a0 != args[0]) { - tcg_out_mov(s, TCG_TYPE_I64, args[0], a0); - } - break; - - case INDEX_op_sub2_i64: - a0 = args[0], a1 = args[1]; - if (a0 == args[5] || (!const_args[4] && a0 == args[4])) { - a0 = TCG_REG_R0; - } - if (const_args[2]) { - tcg_out32(s, SUBFIC | TAI(a0, args[3], args[2])); - } else { - tcg_out32(s, SUBFC | TAB(a0, args[3], args[2])); - } - if (const_args[4]) { - tcg_out32(s, (args[4] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5])); - } else { - tcg_out32(s, SUBFE | TAB(a1, args[5], args[4])); - } - if (a0 != args[0]) { - tcg_out_mov(s, TCG_TYPE_I64, args[0], a0); - } - break; - - case INDEX_op_muluh_i64: - tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2])); - break; - case INDEX_op_mulsh_i64: - tcg_out32(s, MULHD | TAB(args[0], args[1], args[2])); - break; - - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ - case INDEX_op_movi_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - default: - tcg_abort(); - } -} - -static const TCGTargetOpDef ppc_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, - - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_ld_i64, { "r", "r" } }, - { INDEX_op_st8_i32, { "r", "r" } }, - { INDEX_op_st8_i64, { "r", "r" } }, - { INDEX_op_st16_i32, { "r", "r" } }, - { INDEX_op_st16_i64, { "r", "r" } }, - { INDEX_op_st_i32, { "r", "r" } }, - { INDEX_op_st_i64, { "r", "r" } }, - { INDEX_op_st32_i64, { "r", "r" } }, - - { INDEX_op_ld8u_i64, { "r", "r" } }, - { INDEX_op_ld8s_i64, { "r", "r" } }, - { INDEX_op_ld16u_i64, { "r", "r" } }, - { INDEX_op_ld16s_i64, { "r", "r" } }, - { INDEX_op_ld32u_i64, { "r", "r" } }, - { INDEX_op_ld32s_i64, { "r", "r" } }, - - { INDEX_op_add_i32, { "r", "r", "ri" } }, - { INDEX_op_mul_i32, { "r", "r", "rI" } }, - { INDEX_op_div_i32, { "r", "r", "r" } }, - { INDEX_op_divu_i32, { "r", "r", "r" } }, - { INDEX_op_sub_i32, { "r", "rI", "ri" } }, - { INDEX_op_and_i32, { "r", "r", "ri" } }, - { INDEX_op_or_i32, { "r", "r", "ri" } }, - { INDEX_op_xor_i32, { "r", "r", "ri" } }, - { INDEX_op_andc_i32, { "r", "r", "ri" } }, - { INDEX_op_orc_i32, { "r", "r", "ri" } }, - { INDEX_op_eqv_i32, { "r", "r", "ri" } }, - { INDEX_op_nand_i32, { "r", "r", "r" } }, - { INDEX_op_nor_i32, { "r", "r", "r" } }, - - { INDEX_op_shl_i32, { "r", "r", "ri" } }, - { INDEX_op_shr_i32, { "r", "r", "ri" } }, - { INDEX_op_sar_i32, { "r", "r", "ri" } }, - { INDEX_op_rotl_i32, { "r", "r", "ri" } }, - { INDEX_op_rotr_i32, { "r", "r", "ri" } }, - - { INDEX_op_brcond_i32, { "r", "ri" } }, - { INDEX_op_brcond_i64, { "r", "ri" } }, - - { INDEX_op_neg_i32, { "r", "r" } }, - { INDEX_op_not_i32, { "r", "r" } }, - - { INDEX_op_add_i64, { "r", "r", "rT" } }, - { INDEX_op_sub_i64, { "r", "rI", "rT" } }, - { INDEX_op_and_i64, { "r", "r", "ri" } }, - { INDEX_op_or_i64, { "r", "r", "rU" } }, - { INDEX_op_xor_i64, { "r", "r", "rU" } }, - { INDEX_op_andc_i64, { "r", "r", "ri" } }, - { INDEX_op_orc_i64, { "r", "r", "r" } }, - { INDEX_op_eqv_i64, { "r", "r", "r" } }, - { INDEX_op_nand_i64, { "r", "r", "r" } }, - { INDEX_op_nor_i64, { "r", "r", "r" } }, - - { INDEX_op_shl_i64, { "r", "r", "ri" } }, - { INDEX_op_shr_i64, { "r", "r", "ri" } }, - { INDEX_op_sar_i64, { "r", "r", "ri" } }, - { INDEX_op_rotl_i64, { "r", "r", "ri" } }, - { INDEX_op_rotr_i64, { "r", "r", "ri" } }, - - { INDEX_op_mul_i64, { "r", "r", "rI" } }, - { INDEX_op_div_i64, { "r", "r", "r" } }, - { INDEX_op_divu_i64, { "r", "r", "r" } }, - - { INDEX_op_neg_i64, { "r", "r" } }, - { INDEX_op_not_i64, { "r", "r" } }, - - { INDEX_op_qemu_ld_i32, { "r", "L" } }, - { INDEX_op_qemu_ld_i64, { "r", "L" } }, - { INDEX_op_qemu_st_i32, { "S", "S" } }, - { INDEX_op_qemu_st_i64, { "S", "S" } }, - - { INDEX_op_ext8s_i32, { "r", "r" } }, - { INDEX_op_ext16s_i32, { "r", "r" } }, - { INDEX_op_ext8s_i64, { "r", "r" } }, - { INDEX_op_ext16s_i64, { "r", "r" } }, - { INDEX_op_ext32s_i64, { "r", "r" } }, - - { INDEX_op_setcond_i32, { "r", "r", "ri" } }, - { INDEX_op_setcond_i64, { "r", "r", "ri" } }, - { INDEX_op_movcond_i32, { "r", "r", "ri", "rZ", "rZ" } }, - { INDEX_op_movcond_i64, { "r", "r", "ri", "rZ", "rZ" } }, - - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap16_i64, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, - { INDEX_op_bswap32_i64, { "r", "r" } }, - { INDEX_op_bswap64_i64, { "r", "r" } }, - - { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, - { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, - - { INDEX_op_add2_i64, { "r", "r", "r", "r", "rI", "rZM" } }, - { INDEX_op_sub2_i64, { "r", "r", "rI", "r", "rZM", "r" } }, - { INDEX_op_mulsh_i64, { "r", "r", "r" } }, - { INDEX_op_muluh_i64, { "r", "r", "r" } }, - - { -1 }, -}; - -static void tcg_target_init(TCGContext *s) -{ - unsigned long hwcap = qemu_getauxval(AT_HWCAP); - if (hwcap & PPC_FEATURE_ARCH_2_06) { - have_isa_2_06 = true; - } - - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); - tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); - tcg_regset_set32(tcg_target_call_clobber_regs, 0, - (1 << TCG_REG_R0) | - (1 << TCG_REG_R2) | - (1 << TCG_REG_R3) | - (1 << TCG_REG_R4) | - (1 << TCG_REG_R5) | - (1 << TCG_REG_R6) | - (1 << TCG_REG_R7) | - (1 << TCG_REG_R8) | - (1 << TCG_REG_R9) | - (1 << TCG_REG_R10) | - (1 << TCG_REG_R11) | - (1 << TCG_REG_R12)); - - tcg_regset_clear(s->reserved_regs); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* mem temp */ -#ifdef __APPLE__ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R11); /* ??? */ -#endif - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ - - tcg_add_target_add_op_defs(ppc_op_defs); -} - -typedef struct { - DebugFrameCIE cie; - DebugFrameFDEHeader fde; - uint8_t fde_def_cfa[4]; - uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3]; -} DebugFrame; - -/* We're expecting a 2 byte uleb128 encoded value. */ -QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); - -#define ELF_HOST_MACHINE EM_PPC64 - -static DebugFrame debug_frame = { - .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ - .cie.id = -1, - .cie.version = 1, - .cie.code_align = 1, - .cie.data_align = 0x78, /* sleb128 -8 */ - .cie.return_column = 65, - - /* Total FDE size does not include the "len" member. */ - .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset), - - .fde_def_cfa = { - 12, 1, /* DW_CFA_def_cfa r1, ... */ - (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ - (FRAME_SIZE >> 7) - }, - .fde_reg_ofs = { - 0x11, 65, 0x7e, /* DW_CFA_offset_extended_sf, lr, 16 */ - } -}; - -void tcg_register_jit(void *buf, size_t buf_size) -{ - uint8_t *p = &debug_frame.fde_reg_ofs[3]; - int i; - - for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) { - p[0] = 0x80 + tcg_target_callee_save_regs[i]; - p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * 8)) / 8; - } - - debug_frame.fde.func_start = (tcg_target_long) buf; - debug_frame.fde.func_len = buf_size; - - tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); -} diff --git a/tcg/ppc64/tcg-target.h b/tcg/ppc64/tcg-target.h deleted file mode 100644 index f2360c869a..0000000000 --- a/tcg/ppc64/tcg-target.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Tiny Code Generator for QEMU - * - * Copyright (c) 2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#ifndef TCG_TARGET_PPC64 -#define TCG_TARGET_PPC64 1 - -#define TCG_TARGET_NB_REGS 32 -#define TCG_TARGET_INSN_UNIT_SIZE 4 - -typedef enum { - TCG_REG_R0 = 0, - TCG_REG_R1, - TCG_REG_R2, - TCG_REG_R3, - TCG_REG_R4, - TCG_REG_R5, - TCG_REG_R6, - TCG_REG_R7, - TCG_REG_R8, - TCG_REG_R9, - TCG_REG_R10, - TCG_REG_R11, - TCG_REG_R12, - TCG_REG_R13, - TCG_REG_R14, - TCG_REG_R15, - TCG_REG_R16, - TCG_REG_R17, - TCG_REG_R18, - TCG_REG_R19, - TCG_REG_R20, - TCG_REG_R21, - TCG_REG_R22, - TCG_REG_R23, - TCG_REG_R24, - TCG_REG_R25, - TCG_REG_R26, - TCG_REG_R27, - TCG_REG_R28, - TCG_REG_R29, - TCG_REG_R30, - TCG_REG_R31 -} TCGReg; - -/* used for function call generation */ -#define TCG_REG_CALL_STACK TCG_REG_R1 -#define TCG_TARGET_STACK_ALIGN 16 -#define TCG_TARGET_CALL_STACK_OFFSET 48 - -/* optional instructions automatically implemented */ -#define TCG_TARGET_HAS_ext8u_i32 0 /* andi */ -#define TCG_TARGET_HAS_ext16u_i32 0 -#define TCG_TARGET_HAS_ext8u_i64 0 -#define TCG_TARGET_HAS_ext16u_i64 0 -#define TCG_TARGET_HAS_ext32u_i64 0 - -/* optional instructions */ -#define TCG_TARGET_HAS_div_i32 1 -#define TCG_TARGET_HAS_rem_i32 0 -#define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_bswap16_i32 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_neg_i32 1 -#define TCG_TARGET_HAS_andc_i32 1 -#define TCG_TARGET_HAS_orc_i32 1 -#define TCG_TARGET_HAS_eqv_i32 1 -#define TCG_TARGET_HAS_nand_i32 1 -#define TCG_TARGET_HAS_nor_i32 1 -#define TCG_TARGET_HAS_deposit_i32 1 -#define TCG_TARGET_HAS_movcond_i32 1 -#define TCG_TARGET_HAS_add2_i32 0 -#define TCG_TARGET_HAS_sub2_i32 0 -#define TCG_TARGET_HAS_mulu2_i32 0 -#define TCG_TARGET_HAS_muls2_i32 0 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_trunc_shr_i32 0 - -#define TCG_TARGET_HAS_div_i64 1 -#define TCG_TARGET_HAS_rem_i64 0 -#define TCG_TARGET_HAS_rot_i64 1 -#define TCG_TARGET_HAS_ext8s_i64 1 -#define TCG_TARGET_HAS_ext16s_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_bswap16_i64 1 -#define TCG_TARGET_HAS_bswap32_i64 1 -#define TCG_TARGET_HAS_bswap64_i64 1 -#define TCG_TARGET_HAS_not_i64 1 -#define TCG_TARGET_HAS_neg_i64 1 -#define TCG_TARGET_HAS_andc_i64 1 -#define TCG_TARGET_HAS_orc_i64 1 -#define TCG_TARGET_HAS_eqv_i64 1 -#define TCG_TARGET_HAS_nand_i64 1 -#define TCG_TARGET_HAS_nor_i64 1 -#define TCG_TARGET_HAS_deposit_i64 1 -#define TCG_TARGET_HAS_movcond_i64 1 -#define TCG_TARGET_HAS_add2_i64 1 -#define TCG_TARGET_HAS_sub2_i64 1 -#define TCG_TARGET_HAS_mulu2_i64 0 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i64 1 -#define TCG_TARGET_HAS_mulsh_i64 1 - -#define TCG_AREG0 TCG_REG_R27 - -#define TCG_TARGET_EXTEND_ARGS 1 - -#endif @@ -37,7 +37,6 @@ #endif #include "qemu-common.h" -#include "qemu/cache-utils.h" #include "qemu/host-utils.h" #include "qemu/timer.h" diff --git a/util/Makefile.objs b/util/Makefile.objs index df83b629a0..6b3c83b0eb 100644 --- a/util/Makefile.objs +++ b/util/Makefile.objs @@ -1,7 +1,7 @@ util-obj-y = osdep.o cutils.o unicode.o qemu-timer-common.o util-obj-$(CONFIG_WIN32) += oslib-win32.o qemu-thread-win32.o event_notifier-win32.o util-obj-$(CONFIG_POSIX) += oslib-posix.o qemu-thread-posix.o event_notifier-posix.o qemu-openpty.o -util-obj-y += envlist.o path.o host-utils.o cache-utils.o module.o +util-obj-y += envlist.o path.o host-utils.o module.o util-obj-y += bitmap.o bitops.o hbitmap.o util-obj-y += fifo8.o util-obj-y += acl.o diff --git a/util/cache-utils.c b/util/cache-utils.c deleted file mode 100644 index 047003008b..0000000000 --- a/util/cache-utils.c +++ /dev/null @@ -1,86 +0,0 @@ -#include "qemu-common.h" -#include "qemu/cache-utils.h" - -#if defined(_ARCH_PPC) -struct qemu_cache_conf qemu_cache_conf = { - .dcache_bsize = 16, - .icache_bsize = 16 -}; - -#if defined _AIX -#include <sys/systemcfg.h> - -void qemu_cache_utils_init(void) -{ - qemu_cache_conf.icache_bsize = _system_configuration.icache_line; - qemu_cache_conf.dcache_bsize = _system_configuration.dcache_line; -} - -#elif defined __linux__ -#include "qemu/osdep.h" -#include "elf.h" - -void qemu_cache_utils_init(void) -{ - unsigned long dsize = qemu_getauxval(AT_DCACHEBSIZE); - unsigned long isize = qemu_getauxval(AT_ICACHEBSIZE); - - if (dsize == 0 || isize == 0) { - if (dsize == 0) { - fprintf(stderr, "getauxval AT_DCACHEBSIZE failed\n"); - } - if (isize == 0) { - fprintf(stderr, "getauxval AT_ICACHEBSIZE failed\n"); - } - exit(1); - - } - qemu_cache_conf.dcache_bsize = dsize; - qemu_cache_conf.icache_bsize = isize; -} - -#elif defined __APPLE__ -#include <stdio.h> -#include <sys/types.h> -#include <sys/sysctl.h> - -void qemu_cache_utils_init(void) -{ - size_t len; - unsigned cacheline; - int name[2] = { CTL_HW, HW_CACHELINE }; - - len = sizeof(cacheline); - if (sysctl(name, 2, &cacheline, &len, NULL, 0)) { - perror("sysctl CTL_HW HW_CACHELINE failed"); - } else { - qemu_cache_conf.dcache_bsize = cacheline; - qemu_cache_conf.icache_bsize = cacheline; - } -} - -#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -#include <errno.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <sys/types.h> -#include <sys/sysctl.h> - -void qemu_cache_utils_init(void) -{ - size_t len = 4; - unsigned cacheline; - - if (sysctlbyname ("machdep.cacheline_size", &cacheline, &len, NULL, 0)) { - fprintf(stderr, "sysctlbyname machdep.cacheline_size failed: %s\n", - strerror(errno)); - exit(1); - } - - qemu_cache_conf.dcache_bsize = cacheline; - qemu_cache_conf.icache_bsize = cacheline; -} -#endif - -#endif /* _ARCH_PPC */ diff --git a/util/getauxval.c b/util/getauxval.c index 476c883b32..25f48e5456 100644 --- a/util/getauxval.c +++ b/util/getauxval.c @@ -48,24 +48,51 @@ typedef struct { static const ElfW_auxv_t *auxv; -void qemu_init_auxval(char **envp) +static const ElfW_auxv_t *qemu_init_auxval(void) { - /* The auxiliary vector is located just beyond the initial environment. */ - while (*envp++ != NULL) { - continue; + ElfW_auxv_t *a; + ssize_t size = 512, r, ofs; + int fd; + + /* Allocate some initial storage. Make sure the first entry is set + to end-of-list, so that we've got a valid list in case of error. */ + auxv = a = g_malloc(size); + a[0].a_type = 0; + a[0].a_val = 0; + + fd = open("/proc/self/auxv", O_RDONLY); + if (fd < 0) { + return a; + } + + /* Read the first SIZE bytes. Hopefully, this covers everything. */ + r = read(fd, a, size); + + if (r == size) { + /* Continue to expand until we do get a partial read. */ + do { + ofs = size; + size *= 2; + auxv = a = g_realloc(a, size); + r = read(fd, (char *)a + ofs, ofs); + } while (r == ofs); } - auxv = (const ElfW_auxv_t *)envp; + + close(fd); + return a; } unsigned long qemu_getauxval(unsigned long type) { - /* If we were able to find the auxiliary vector, use it. */ - if (auxv) { - const ElfW_auxv_t *a; - for (a = auxv; a->a_type != 0; a++) { - if (a->a_type == type) { - return a->a_val; - } + const ElfW_auxv_t *a = auxv; + + if (unlikely(a == NULL)) { + a = qemu_init_auxval(); + } + + for (; a->a_type != 0; a++) { + if (a->a_type == type) { + return a->a_val; } } @@ -82,7 +82,6 @@ int main(int argc, char **argv) #include "qemu/timer.h" #include "sysemu/char.h" #include "qemu/bitmap.h" -#include "qemu/cache-utils.h" #include "sysemu/blockdev.h" #include "hw/block/block.h" #include "migration/block.h" @@ -2974,9 +2973,6 @@ int main(int argc, char **argv, char **envp) rtc_clock = QEMU_CLOCK_HOST; - qemu_init_auxval(envp); - qemu_cache_utils_init(); - QLIST_INIT (&vm_change_state_head); os_setup_early_signal_handling(); |