diff options
author | Frank Chang <frank.chang@sifive.com> | 2021-12-10 15:56:06 +0800 |
---|---|---|
committer | Alistair Francis <alistair.francis@wdc.com> | 2021-12-20 14:51:36 +1000 |
commit | 79556fb6fa067922fb11d2a1209852900109c7ae (patch) | |
tree | d8b0f6f44234279b65229ad26ef517d68d83bb57 /target/riscv/vector_helper.c | |
parent | d9b7609a1fb237dd05fac4cfe5163429115c9c6d (diff) | |
download | qemu-79556fb6fa067922fb11d2a1209852900109c7ae.zip |
target/riscv: rvv-1.0: stride load and store instructions
Signed-off-by: Frank Chang <frank.chang@sifive.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-Id: <20211210075704.23951-21-frank.chang@sifive.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
Diffstat (limited to 'target/riscv/vector_helper.c')
-rw-r--r-- | target/riscv/vector_helper.c | 199 |
1 files changed, 69 insertions, 130 deletions
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 78fae78284..9141292994 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -178,38 +178,36 @@ static inline int vext_elem_mask(void *v0, int index) typedef void vext_ldst_elem_fn(CPURISCVState *env, target_ulong addr, uint32_t idx, void *vd, uintptr_t retaddr); -#define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF) \ +#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \ static void NAME(CPURISCVState *env, abi_ptr addr, \ uint32_t idx, void *vd, uintptr_t retaddr)\ { \ - MTYPE data; \ ETYPE *cur = ((ETYPE *)vd + H(idx)); \ - data = cpu_##LDSUF##_data_ra(env, addr, retaddr); \ - *cur = data; \ + *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \ } \ -GEN_VEXT_LD_ELEM(ldb_b, int8_t, int8_t, H1, ldsb) -GEN_VEXT_LD_ELEM(ldb_h, int8_t, int16_t, H2, ldsb) -GEN_VEXT_LD_ELEM(ldb_w, int8_t, int32_t, H4, ldsb) -GEN_VEXT_LD_ELEM(ldb_d, int8_t, int64_t, H8, ldsb) -GEN_VEXT_LD_ELEM(ldh_h, int16_t, int16_t, H2, ldsw) -GEN_VEXT_LD_ELEM(ldh_w, int16_t, int32_t, H4, ldsw) -GEN_VEXT_LD_ELEM(ldh_d, int16_t, int64_t, H8, ldsw) -GEN_VEXT_LD_ELEM(ldw_w, int32_t, int32_t, H4, ldl) -GEN_VEXT_LD_ELEM(ldw_d, int32_t, int64_t, H8, ldl) -GEN_VEXT_LD_ELEM(lde_b, int8_t, int8_t, H1, ldsb) -GEN_VEXT_LD_ELEM(lde_h, int16_t, int16_t, H2, ldsw) -GEN_VEXT_LD_ELEM(lde_w, int32_t, int32_t, H4, ldl) -GEN_VEXT_LD_ELEM(lde_d, int64_t, int64_t, H8, ldq) -GEN_VEXT_LD_ELEM(ldbu_b, uint8_t, uint8_t, H1, ldub) -GEN_VEXT_LD_ELEM(ldbu_h, uint8_t, uint16_t, H2, ldub) -GEN_VEXT_LD_ELEM(ldbu_w, uint8_t, uint32_t, H4, ldub) -GEN_VEXT_LD_ELEM(ldbu_d, uint8_t, uint64_t, H8, ldub) -GEN_VEXT_LD_ELEM(ldhu_h, uint16_t, uint16_t, H2, lduw) -GEN_VEXT_LD_ELEM(ldhu_w, uint16_t, uint32_t, H4, lduw) -GEN_VEXT_LD_ELEM(ldhu_d, uint16_t, uint64_t, H8, lduw) -GEN_VEXT_LD_ELEM(ldwu_w, uint32_t, uint32_t, H4, ldl) -GEN_VEXT_LD_ELEM(ldwu_d, uint32_t, uint64_t, H8, ldl) +GEN_VEXT_LD_ELEM(ldb_b, int8_t, H1, ldsb) +GEN_VEXT_LD_ELEM(ldb_h, int16_t, H2, ldsb) +GEN_VEXT_LD_ELEM(ldb_w, int32_t, H4, ldsb) +GEN_VEXT_LD_ELEM(ldb_d, int64_t, H8, ldsb) +GEN_VEXT_LD_ELEM(ldh_h, int16_t, H2, ldsw) +GEN_VEXT_LD_ELEM(ldh_w, int32_t, H4, ldsw) +GEN_VEXT_LD_ELEM(ldh_d, int64_t, H8, ldsw) +GEN_VEXT_LD_ELEM(ldw_w, int32_t, H4, ldl) +GEN_VEXT_LD_ELEM(ldw_d, int64_t, H8, ldl) +GEN_VEXT_LD_ELEM(lde_b, int8_t, H1, ldsb) +GEN_VEXT_LD_ELEM(lde_h, int16_t, H2, ldsw) +GEN_VEXT_LD_ELEM(lde_w, int32_t, H4, ldl) +GEN_VEXT_LD_ELEM(lde_d, int64_t, H8, ldq) +GEN_VEXT_LD_ELEM(ldbu_b, uint8_t, H1, ldub) +GEN_VEXT_LD_ELEM(ldbu_h, uint16_t, H2, ldub) +GEN_VEXT_LD_ELEM(ldbu_w, uint32_t, H4, ldub) +GEN_VEXT_LD_ELEM(ldbu_d, uint64_t, H8, ldub) +GEN_VEXT_LD_ELEM(ldhu_h, uint16_t, H2, lduw) +GEN_VEXT_LD_ELEM(ldhu_w, uint32_t, H4, lduw) +GEN_VEXT_LD_ELEM(ldhu_d, uint64_t, H8, lduw) +GEN_VEXT_LD_ELEM(ldwu_w, uint32_t, H4, ldl) +GEN_VEXT_LD_ELEM(ldwu_d, uint64_t, H8, ldl) #define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \ static void NAME(CPURISCVState *env, abi_ptr addr, \ @@ -241,8 +239,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, target_ulong stride, CPURISCVState *env, uint32_t desc, uint32_t vm, vext_ldst_elem_fn *ldst_elem, - uint32_t esz, uint32_t msz, uintptr_t ra, - MMUAccessType access_type) + uint32_t esz, uintptr_t ra, MMUAccessType access_type) { uint32_t i, k; uint32_t nf = vext_nf(desc); @@ -253,7 +250,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, if (!vm && !vext_elem_mask(v0, i)) { continue; } - probe_pages(env, base + stride * i, nf * msz, ra, access_type); + probe_pages(env, base + stride * i, nf * esz, ra, access_type); } /* do real access */ for (i = 0; i < env->vl; i++) { @@ -262,71 +259,42 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, continue; } while (k < nf) { - target_ulong addr = base + stride * i + k * msz; + target_ulong addr = base + stride * i + k * esz; ldst_elem(env, addr, i + k * vlmax, vd, ra); k++; } } } -#define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN) \ +#define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \ void HELPER(NAME)(void *vd, void * v0, target_ulong base, \ target_ulong stride, CPURISCVState *env, \ uint32_t desc) \ { \ uint32_t vm = vext_vm(desc); \ vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \ - sizeof(ETYPE), sizeof(MTYPE), \ - GETPC(), MMU_DATA_LOAD); \ -} - -GEN_VEXT_LD_STRIDE(vlsb_v_b, int8_t, int8_t, ldb_b) -GEN_VEXT_LD_STRIDE(vlsb_v_h, int8_t, int16_t, ldb_h) -GEN_VEXT_LD_STRIDE(vlsb_v_w, int8_t, int32_t, ldb_w) -GEN_VEXT_LD_STRIDE(vlsb_v_d, int8_t, int64_t, ldb_d) -GEN_VEXT_LD_STRIDE(vlsh_v_h, int16_t, int16_t, ldh_h) -GEN_VEXT_LD_STRIDE(vlsh_v_w, int16_t, int32_t, ldh_w) -GEN_VEXT_LD_STRIDE(vlsh_v_d, int16_t, int64_t, ldh_d) -GEN_VEXT_LD_STRIDE(vlsw_v_w, int32_t, int32_t, ldw_w) -GEN_VEXT_LD_STRIDE(vlsw_v_d, int32_t, int64_t, ldw_d) -GEN_VEXT_LD_STRIDE(vlse_v_b, int8_t, int8_t, lde_b) -GEN_VEXT_LD_STRIDE(vlse_v_h, int16_t, int16_t, lde_h) -GEN_VEXT_LD_STRIDE(vlse_v_w, int32_t, int32_t, lde_w) -GEN_VEXT_LD_STRIDE(vlse_v_d, int64_t, int64_t, lde_d) -GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t, uint8_t, ldbu_b) -GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t, uint16_t, ldbu_h) -GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t, uint32_t, ldbu_w) -GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t, uint64_t, ldbu_d) -GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h) -GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w) -GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d) -GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w) -GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d) - -#define GEN_VEXT_ST_STRIDE(NAME, MTYPE, ETYPE, STORE_FN) \ + sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \ +} + +GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b) +GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h) +GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w) +GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d) + +#define GEN_VEXT_ST_STRIDE(NAME, ETYPE, STORE_FN) \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ target_ulong stride, CPURISCVState *env, \ uint32_t desc) \ { \ uint32_t vm = vext_vm(desc); \ vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \ - sizeof(ETYPE), sizeof(MTYPE), \ - GETPC(), MMU_DATA_STORE); \ -} - -GEN_VEXT_ST_STRIDE(vssb_v_b, int8_t, int8_t, stb_b) -GEN_VEXT_ST_STRIDE(vssb_v_h, int8_t, int16_t, stb_h) -GEN_VEXT_ST_STRIDE(vssb_v_w, int8_t, int32_t, stb_w) -GEN_VEXT_ST_STRIDE(vssb_v_d, int8_t, int64_t, stb_d) -GEN_VEXT_ST_STRIDE(vssh_v_h, int16_t, int16_t, sth_h) -GEN_VEXT_ST_STRIDE(vssh_v_w, int16_t, int32_t, sth_w) -GEN_VEXT_ST_STRIDE(vssh_v_d, int16_t, int64_t, sth_d) -GEN_VEXT_ST_STRIDE(vssw_v_w, int32_t, int32_t, stw_w) -GEN_VEXT_ST_STRIDE(vssw_v_d, int32_t, int64_t, stw_d) -GEN_VEXT_ST_STRIDE(vsse_v_b, int8_t, int8_t, ste_b) -GEN_VEXT_ST_STRIDE(vsse_v_h, int16_t, int16_t, ste_h) -GEN_VEXT_ST_STRIDE(vsse_v_w, int32_t, int32_t, ste_w) -GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d) + sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \ +} + +GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b) +GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h) +GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w) +GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d) /* *** unit-stride: access elements stored contiguously in memory @@ -335,20 +303,20 @@ GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d) /* unmasked unit-stride load and store operation*/ static void vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, - vext_ldst_elem_fn *ldst_elem, uint32_t esz, uint32_t msz, - uintptr_t ra, MMUAccessType access_type) + vext_ldst_elem_fn *ldst_elem, + uint32_t esz, uintptr_t ra, MMUAccessType access_type) { uint32_t i, k; uint32_t nf = vext_nf(desc); uint32_t vlmax = vext_maxsz(desc) / esz; /* probe every access */ - probe_pages(env, base, env->vl * nf * msz, ra, access_type); + probe_pages(env, base, env->vl * nf * esz, ra, access_type); /* load bytes from guest memory */ for (i = 0; i < env->vl; i++) { k = 0; while (k < nf) { - target_ulong addr = base + (i * nf + k) * msz; + target_ulong addr = base + (i * nf + k) * esz; ldst_elem(env, addr, i + k * vlmax, vd, ra); k++; } @@ -360,76 +328,47 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, * stride = NF * sizeof (MTYPE) */ -#define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN) \ +#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \ + uint32_t stride = vext_nf(desc) * sizeof(ETYPE); \ vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \ - sizeof(ETYPE), sizeof(MTYPE), \ - GETPC(), MMU_DATA_LOAD); \ + sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \ } \ \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ CPURISCVState *env, uint32_t desc) \ { \ vext_ldst_us(vd, base, env, desc, LOAD_FN, \ - sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_LOAD); \ -} - -GEN_VEXT_LD_US(vlb_v_b, int8_t, int8_t, ldb_b) -GEN_VEXT_LD_US(vlb_v_h, int8_t, int16_t, ldb_h) -GEN_VEXT_LD_US(vlb_v_w, int8_t, int32_t, ldb_w) -GEN_VEXT_LD_US(vlb_v_d, int8_t, int64_t, ldb_d) -GEN_VEXT_LD_US(vlh_v_h, int16_t, int16_t, ldh_h) -GEN_VEXT_LD_US(vlh_v_w, int16_t, int32_t, ldh_w) -GEN_VEXT_LD_US(vlh_v_d, int16_t, int64_t, ldh_d) -GEN_VEXT_LD_US(vlw_v_w, int32_t, int32_t, ldw_w) -GEN_VEXT_LD_US(vlw_v_d, int32_t, int64_t, ldw_d) -GEN_VEXT_LD_US(vle_v_b, int8_t, int8_t, lde_b) -GEN_VEXT_LD_US(vle_v_h, int16_t, int16_t, lde_h) -GEN_VEXT_LD_US(vle_v_w, int32_t, int32_t, lde_w) -GEN_VEXT_LD_US(vle_v_d, int64_t, int64_t, lde_d) -GEN_VEXT_LD_US(vlbu_v_b, uint8_t, uint8_t, ldbu_b) -GEN_VEXT_LD_US(vlbu_v_h, uint8_t, uint16_t, ldbu_h) -GEN_VEXT_LD_US(vlbu_v_w, uint8_t, uint32_t, ldbu_w) -GEN_VEXT_LD_US(vlbu_v_d, uint8_t, uint64_t, ldbu_d) -GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h) -GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w) -GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d) -GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w) -GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d) - -#define GEN_VEXT_ST_US(NAME, MTYPE, ETYPE, STORE_FN) \ + sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \ +} + +GEN_VEXT_LD_US(vle8_v, int8_t, lde_b) +GEN_VEXT_LD_US(vle16_v, int16_t, lde_h) +GEN_VEXT_LD_US(vle32_v, int32_t, lde_w) +GEN_VEXT_LD_US(vle64_v, int64_t, lde_d) + +#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ CPURISCVState *env, uint32_t desc) \ { \ - uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \ + uint32_t stride = vext_nf(desc) * sizeof(ETYPE); \ vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \ - sizeof(ETYPE), sizeof(MTYPE), \ - GETPC(), MMU_DATA_STORE); \ + sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \ } \ \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ CPURISCVState *env, uint32_t desc) \ { \ vext_ldst_us(vd, base, env, desc, STORE_FN, \ - sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_STORE);\ -} - -GEN_VEXT_ST_US(vsb_v_b, int8_t, int8_t , stb_b) -GEN_VEXT_ST_US(vsb_v_h, int8_t, int16_t, stb_h) -GEN_VEXT_ST_US(vsb_v_w, int8_t, int32_t, stb_w) -GEN_VEXT_ST_US(vsb_v_d, int8_t, int64_t, stb_d) -GEN_VEXT_ST_US(vsh_v_h, int16_t, int16_t, sth_h) -GEN_VEXT_ST_US(vsh_v_w, int16_t, int32_t, sth_w) -GEN_VEXT_ST_US(vsh_v_d, int16_t, int64_t, sth_d) -GEN_VEXT_ST_US(vsw_v_w, int32_t, int32_t, stw_w) -GEN_VEXT_ST_US(vsw_v_d, int32_t, int64_t, stw_d) -GEN_VEXT_ST_US(vse_v_b, int8_t, int8_t , ste_b) -GEN_VEXT_ST_US(vse_v_h, int16_t, int16_t, ste_h) -GEN_VEXT_ST_US(vse_v_w, int32_t, int32_t, ste_w) -GEN_VEXT_ST_US(vse_v_d, int64_t, int64_t, ste_d) + sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \ +} + +GEN_VEXT_ST_US(vse8_v, int8_t, ste_b) +GEN_VEXT_ST_US(vse16_v, int16_t, ste_h) +GEN_VEXT_ST_US(vse32_v, int32_t, ste_w) +GEN_VEXT_ST_US(vse64_v, int64_t, ste_d) /* *** index: access vector element from indexed memory |