summaryrefslogtreecommitdiff
path: root/target/arm/translate-vfp.inc.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-06-11 16:39:46 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-06-13 15:14:04 +0100
commit3993d0407dff7233e42f2251db971e126a0497e9 (patch)
tree343a00b2a1d2f857a3f20290deb456af0aef367d /target/arm/translate-vfp.inc.c
parentfa288de272c5c8a66d5eb683b123706a52bc7ad6 (diff)
downloadqemu-3993d0407dff7233e42f2251db971e126a0497e9.zip
target/arm: Remove VLDR/VSTR/VLDM/VSTM use of cpu_F0s and cpu_F0d
Expand out the sequences in the new decoder VLDR/VSTR/VLDM/VSTM trans functions which perform the memory accesses by going via the TCG globals cpu_F0s and cpu_F0d, to use local TCG temps instead. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target/arm/translate-vfp.inc.c')
-rw-r--r--target/arm/translate-vfp.inc.c46
1 files changed, 28 insertions, 18 deletions
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
index 32a1805e58..9729946d73 100644
--- a/target/arm/translate-vfp.inc.c
+++ b/target/arm/translate-vfp.inc.c
@@ -857,7 +857,7 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a)
static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
{
uint32_t offset;
- TCGv_i32 addr;
+ TCGv_i32 addr, tmp;
if (!vfp_access_check(s)) {
return true;
@@ -876,13 +876,15 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
addr = load_reg(s, a->rn);
}
tcg_gen_addi_i32(addr, addr, offset);
+ tmp = tcg_temp_new_i32();
if (a->l) {
- gen_vfp_ld(s, false, addr);
- gen_mov_vreg_F0(false, a->vd);
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ neon_store_reg32(tmp, a->vd);
} else {
- gen_mov_F0_vreg(false, a->vd);
- gen_vfp_st(s, false, addr);
+ neon_load_reg32(tmp, a->vd);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
}
+ tcg_temp_free_i32(tmp);
tcg_temp_free_i32(addr);
return true;
@@ -892,6 +894,7 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
{
uint32_t offset;
TCGv_i32 addr;
+ TCGv_i64 tmp;
/* UNDEF accesses to D16-D31 if they don't exist */
if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
@@ -915,13 +918,15 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
addr = load_reg(s, a->rn);
}
tcg_gen_addi_i32(addr, addr, offset);
+ tmp = tcg_temp_new_i64();
if (a->l) {
- gen_vfp_ld(s, true, addr);
- gen_mov_vreg_F0(true, a->vd);
+ gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
+ neon_store_reg64(tmp, a->vd);
} else {
- gen_mov_F0_vreg(true, a->vd);
- gen_vfp_st(s, true, addr);
+ neon_load_reg64(tmp, a->vd);
+ gen_aa32_st64(s, tmp, addr, get_mem_index(s));
}
+ tcg_temp_free_i64(tmp);
tcg_temp_free_i32(addr);
return true;
@@ -930,7 +935,7 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
{
uint32_t offset;
- TCGv_i32 addr;
+ TCGv_i32 addr, tmp;
int i, n;
n = a->imm;
@@ -976,18 +981,20 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
}
offset = 4;
+ tmp = tcg_temp_new_i32();
for (i = 0; i < n; i++) {
if (a->l) {
/* load */
- gen_vfp_ld(s, false, addr);
- gen_mov_vreg_F0(false, a->vd + i);
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ neon_store_reg32(tmp, a->vd + i);
} else {
/* store */
- gen_mov_F0_vreg(false, a->vd + i);
- gen_vfp_st(s, false, addr);
+ neon_load_reg32(tmp, a->vd + i);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
}
tcg_gen_addi_i32(addr, addr, offset);
}
+ tcg_temp_free_i32(tmp);
if (a->w) {
/* writeback */
if (a->p) {
@@ -1006,6 +1013,7 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
{
uint32_t offset;
TCGv_i32 addr;
+ TCGv_i64 tmp;
int i, n;
n = a->imm >> 1;
@@ -1056,18 +1064,20 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
}
offset = 8;
+ tmp = tcg_temp_new_i64();
for (i = 0; i < n; i++) {
if (a->l) {
/* load */
- gen_vfp_ld(s, true, addr);
- gen_mov_vreg_F0(true, a->vd + i);
+ gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
+ neon_store_reg64(tmp, a->vd + i);
} else {
/* store */
- gen_mov_F0_vreg(true, a->vd + i);
- gen_vfp_st(s, true, addr);
+ neon_load_reg64(tmp, a->vd + i);
+ gen_aa32_st64(s, tmp, addr, get_mem_index(s));
}
tcg_gen_addi_i32(addr, addr, offset);
}
+ tcg_temp_free_i64(tmp);
if (a->w) {
/* writeback */
if (a->p) {