summaryrefslogtreecommitdiff
path: root/target/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'target/riscv')
-rw-r--r--target/riscv/cpu.c1
-rw-r--r--target/riscv/cpu.h4
-rw-r--r--target/riscv/cpu_helper.c144
-rw-r--r--target/riscv/csr.c77
-rw-r--r--target/riscv/pmp.c84
-rw-r--r--target/riscv/pmp.h4
-rw-r--r--target/riscv/translate.c179
7 files changed, 209 insertions, 284 deletions
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 2a990f6253..7d6ed80f6b 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -356,6 +356,7 @@ static void riscv_cpu_reset(DeviceState *dev)
env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
env->mcause = 0;
env->pc = env->resetvec;
+ env->two_stage_lookup = false;
#endif
cs->exception_index = EXCP_NONE;
env->load_res = -1;
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 0edb2826a2..0a33d387ba 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -213,6 +213,10 @@ struct CPURISCVState {
target_ulong satp_hs;
uint64_t mstatus_hs;
+ /* Signals whether the current exception occurred with two-stage address
+ translation active. */
+ bool two_stage_lookup;
+
target_ulong scounteren;
target_ulong mcounteren;
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 83a6bcfad0..21c54ef561 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -280,6 +280,49 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
env->load_res = -1;
}
+/*
+ * get_physical_address_pmp - check PMP permission for this physical address
+ *
+ * Match the PMP region and check permission for this physical address and it's
+ * TLB page. Returns 0 if the permission checking was successful
+ *
+ * @env: CPURISCVState
+ * @prot: The returned protection attributes
+ * @tlb_size: TLB page size containing addr. It could be modified after PMP
+ * permission checking. NULL if not set TLB page for addr.
+ * @addr: The physical address to be checked permission
+ * @access_type: The type of MMU access
+ * @mode: Indicates current privilege level.
+ */
+static int get_physical_address_pmp(CPURISCVState *env, int *prot,
+ target_ulong *tlb_size, hwaddr addr,
+ int size, MMUAccessType access_type,
+ int mode)
+{
+ pmp_priv_t pmp_priv;
+ target_ulong tlb_size_pmp = 0;
+
+ if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TRANSLATE_SUCCESS;
+ }
+
+ if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
+ mode)) {
+ *prot = 0;
+ return TRANSLATE_PMP_FAIL;
+ }
+
+ *prot = pmp_priv_to_page_prot(pmp_priv);
+ if (tlb_size != NULL) {
+ if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
+ *tlb_size = tlb_size_pmp;
+ }
+ }
+
+ return TRANSLATE_SUCCESS;
+}
+
/* get_physical_address - get the physical address for this virtual address
*
* Do a page table walk to obtain the physical address corresponding to a
@@ -321,11 +364,15 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
* was called. Background registers will be used if the guest has
* forced a two stage translation to be on (in HS or M mode).
*/
- if (!riscv_cpu_virt_enabled(env) && riscv_cpu_two_stage_lookup(mmu_idx)) {
+ if (!riscv_cpu_virt_enabled(env) && two_stage) {
use_background = true;
}
- if (mode == PRV_M && access_type != MMU_INST_FETCH) {
+ /* MPRV does not affect the virtual-machine load/store
+ instructions, HLV, HLVX, and HSV. */
+ if (riscv_cpu_two_stage_lookup(mmu_idx)) {
+ mode = get_field(env->hstatus, HSTATUS_SPVP);
+ } else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
if (get_field(env->mstatus, MSTATUS_MPRV)) {
mode = get_field(env->mstatus, MSTATUS_MPP);
}
@@ -442,9 +489,11 @@ restart:
pte_addr = base + idx * ptesize;
}
- if (riscv_feature(env, RISCV_FEATURE_PMP) &&
- !pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong),
- 1 << MMU_DATA_LOAD, PRV_S)) {
+ int pmp_prot;
+ int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
+ sizeof(target_ulong),
+ MMU_DATA_LOAD, PRV_S);
+ if (pmp_ret != TRANSLATE_SUCCESS) {
return TRANSLATE_PMP_FAIL;
}
@@ -605,6 +654,7 @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
g_assert_not_reached();
}
env->badaddr = address;
+ env->two_stage_lookup = two_stage;
}
hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
@@ -646,6 +696,8 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
}
env->badaddr = addr;
+ env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
+ riscv_cpu_two_stage_lookup(mmu_idx);
riscv_raise_exception(&cpu->env, cs->exception_index, retaddr);
}
@@ -669,6 +721,8 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
g_assert_not_reached();
}
env->badaddr = addr;
+ env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
+ riscv_cpu_two_stage_lookup(mmu_idx);
riscv_raise_exception(env, cs->exception_index, retaddr);
}
#endif /* !CONFIG_USER_ONLY */
@@ -682,32 +736,32 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
#ifndef CONFIG_USER_ONLY
vaddr im_address;
hwaddr pa = 0;
- int prot, prot2;
+ int prot, prot2, prot_pmp;
bool pmp_violation = false;
bool first_stage_error = true;
bool two_stage_lookup = false;
int ret = TRANSLATE_FAIL;
int mode = mmu_idx;
- target_ulong tlb_size = 0;
+ /* default TLB page size */
+ target_ulong tlb_size = TARGET_PAGE_SIZE;
env->guest_phys_fault_addr = 0;
qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, address, access_type, mmu_idx);
- if (mode == PRV_M && access_type != MMU_INST_FETCH) {
- if (get_field(env->mstatus, MSTATUS_MPRV)) {
- mode = get_field(env->mstatus, MSTATUS_MPP);
+ /* MPRV does not affect the virtual-machine load/store
+ instructions, HLV, HLVX, and HSV. */
+ if (riscv_cpu_two_stage_lookup(mmu_idx)) {
+ mode = get_field(env->hstatus, HSTATUS_SPVP);
+ } else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
+ get_field(env->mstatus, MSTATUS_MPRV)) {
+ mode = get_field(env->mstatus, MSTATUS_MPP);
+ if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
+ two_stage_lookup = true;
}
}
- if (riscv_has_ext(env, RVH) && env->priv == PRV_M &&
- access_type != MMU_INST_FETCH &&
- get_field(env->mstatus, MSTATUS_MPRV) &&
- get_field(env->mstatus, MSTATUS_MPV)) {
- two_stage_lookup = true;
- }
-
if (riscv_cpu_virt_enabled(env) ||
((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
access_type != MMU_INST_FETCH)) {
@@ -745,10 +799,16 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
prot &= prot2;
- if (riscv_feature(env, RISCV_FEATURE_PMP) &&
- (ret == TRANSLATE_SUCCESS) &&
- !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) {
- ret = TRANSLATE_PMP_FAIL;
+ if (ret == TRANSLATE_SUCCESS) {
+ ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
+ size, access_type, mode);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s PMP address=" TARGET_FMT_plx " ret %d prot"
+ " %d tlb_size " TARGET_FMT_lu "\n",
+ __func__, pa, ret, prot_pmp, tlb_size);
+
+ prot &= prot_pmp;
}
if (ret != TRANSLATE_SUCCESS) {
@@ -771,25 +831,27 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
"%s address=%" VADDR_PRIx " ret %d physical "
TARGET_FMT_plx " prot %d\n",
__func__, address, ret, pa, prot);
- }
- if (riscv_feature(env, RISCV_FEATURE_PMP) &&
- (ret == TRANSLATE_SUCCESS) &&
- !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) {
- ret = TRANSLATE_PMP_FAIL;
+ if (ret == TRANSLATE_SUCCESS) {
+ ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
+ size, access_type, mode);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s PMP address=" TARGET_FMT_plx " ret %d prot"
+ " %d tlb_size " TARGET_FMT_lu "\n",
+ __func__, pa, ret, prot_pmp, tlb_size);
+
+ prot &= prot_pmp;
+ }
}
+
if (ret == TRANSLATE_PMP_FAIL) {
pmp_violation = true;
}
if (ret == TRANSLATE_SUCCESS) {
- if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) {
- tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
- prot, mmu_idx, tlb_size);
- } else {
- tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
- prot, mmu_idx, TARGET_PAGE_SIZE);
- }
+ tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
+ prot, mmu_idx, tlb_size);
return true;
} else if (probe) {
return false;
@@ -910,16 +972,8 @@ void riscv_cpu_do_interrupt(CPUState *cs)
/* handle the trap in S-mode */
if (riscv_has_ext(env, RVH)) {
target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
- bool two_stage_lookup = false;
-
- if (env->priv == PRV_M ||
- (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
- (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
- get_field(env->hstatus, HSTATUS_HU))) {
- two_stage_lookup = true;
- }
- if ((riscv_cpu_virt_enabled(env) || two_stage_lookup) && write_tval) {
+ if (env->two_stage_lookup && write_tval) {
/*
* If we are writing a guest virtual address to stval, set
* this to 1. If we are trapping to VS we will set this to 0
@@ -957,10 +1011,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
riscv_cpu_set_force_hs_excep(env, 0);
} else {
/* Trap into HS mode */
- if (!two_stage_lookup) {
- env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
- riscv_cpu_virt_enabled(env));
- }
+ env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
htval = env->guest_phys_fault_addr;
}
}
@@ -1016,6 +1067,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
* RISC-V ISA Specification.
*/
+ env->two_stage_lookup = false;
#endif
cs->exception_index = EXCP_NONE; /* mark handled to qemu */
}
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index fd2e6363f3..d2585395bf 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -54,7 +54,7 @@ static int vs(CPURISCVState *env, int csrno)
if (env->misa & RVV) {
return 0;
}
- return -1;
+ return -RISCV_EXCP_ILLEGAL_INST;
}
static int ctr(CPURISCVState *env, int csrno)
@@ -420,7 +420,8 @@ static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
SSTATUS_SUM | SSTATUS_MXR | SSTATUS_SD;
static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
-static const target_ulong hip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
+static const target_ulong hip_writable_mask = MIP_VSSIP;
+static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
static const target_ulong vsip_writable_mask = MIP_VSSIP;
static const char valid_vm_1_10_32[16] = {
@@ -748,30 +749,42 @@ static int write_sstatus(CPURISCVState *env, int csrno, target_ulong val)
return write_mstatus(env, CSR_MSTATUS, newval);
}
+static int read_vsie(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ /* Shift the VS bits to their S bit location in vsie */
+ *val = (env->mie & env->hideleg & VS_MODE_INTERRUPTS) >> 1;
+ return 0;
+}
+
static int read_sie(CPURISCVState *env, int csrno, target_ulong *val)
{
if (riscv_cpu_virt_enabled(env)) {
- /* Tell the guest the VS bits, shifted to the S bit locations */
- *val = (env->mie & env->mideleg & VS_MODE_INTERRUPTS) >> 1;
+ read_vsie(env, CSR_VSIE, val);
} else {
*val = env->mie & env->mideleg;
}
return 0;
}
-static int write_sie(CPURISCVState *env, int csrno, target_ulong val)
+static int write_vsie(CPURISCVState *env, int csrno, target_ulong val)
{
- target_ulong newval;
+ /* Shift the S bits to their VS bit location in mie */
+ target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) |
+ ((val << 1) & env->hideleg & VS_MODE_INTERRUPTS);
+ return write_mie(env, CSR_MIE, newval);
+}
+static int write_sie(CPURISCVState *env, int csrno, target_ulong val)
+{
if (riscv_cpu_virt_enabled(env)) {
- /* Shift the guests S bits to VS */
- newval = (env->mie & ~VS_MODE_INTERRUPTS) |
- ((val << 1) & VS_MODE_INTERRUPTS);
+ write_vsie(env, CSR_VSIE, val);
} else {
- newval = (env->mie & ~S_MODE_INTERRUPTS) | (val & S_MODE_INTERRUPTS);
+ target_ulong newval = (env->mie & ~S_MODE_INTERRUPTS) |
+ (val & S_MODE_INTERRUPTS);
+ write_mie(env, CSR_MIE, newval);
}
- return write_mie(env, CSR_MIE, newval);
+ return 0;
}
static int read_stvec(CPURISCVState *env, int csrno, target_ulong *val)
@@ -852,17 +865,25 @@ static int write_sbadaddr(CPURISCVState *env, int csrno, target_ulong val)
return 0;
}
+static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ /* Shift the S bits to their VS bit location in mip */
+ int ret = rmw_mip(env, 0, ret_value, new_value << 1,
+ (write_mask << 1) & vsip_writable_mask & env->hideleg);
+ *ret_value &= VS_MODE_INTERRUPTS;
+ /* Shift the VS bits to their S bit location in vsip */
+ *ret_value >>= 1;
+ return ret;
+}
+
static int rmw_sip(CPURISCVState *env, int csrno, target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask)
{
int ret;
if (riscv_cpu_virt_enabled(env)) {
- /* Shift the new values to line up with the VS bits */
- ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value << 1,
- (write_mask & sip_writable_mask) << 1 & env->mideleg);
- ret &= vsip_writable_mask;
- ret >>= 1;
+ ret = rmw_vsip(env, CSR_VSIP, ret_value, new_value, write_mask);
} else {
ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value,
write_mask & env->mideleg & sip_writable_mask);
@@ -962,9 +983,9 @@ static int rmw_hvip(CPURISCVState *env, int csrno, target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask)
{
int ret = rmw_mip(env, 0, ret_value, new_value,
- write_mask & hip_writable_mask);
+ write_mask & hvip_writable_mask);
- *ret_value &= hip_writable_mask;
+ *ret_value &= hvip_writable_mask;
return ret;
}
@@ -1121,26 +1142,6 @@ static int write_vsstatus(CPURISCVState *env, int csrno, target_ulong val)
return 0;
}
-static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value,
- target_ulong new_value, target_ulong write_mask)
-{
- int ret = rmw_mip(env, 0, ret_value, new_value,
- write_mask & env->mideleg & vsip_writable_mask);
- return ret;
-}
-
-static int read_vsie(CPURISCVState *env, int csrno, target_ulong *val)
-{
- *val = env->mie & env->mideleg & VS_MODE_INTERRUPTS;
- return 0;
-}
-
-static int write_vsie(CPURISCVState *env, int csrno, target_ulong val)
-{
- target_ulong newval = (env->mie & ~env->mideleg) | (val & env->mideleg & MIP_VSSIP);
- return write_mie(env, CSR_MIE, newval);
-}
-
static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
{
*val = env->vstvec;
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 80d0334e1b..cff020122a 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -28,6 +28,7 @@
#include "qapi/error.h"
#include "cpu.h"
#include "trace.h"
+#include "exec/exec-all.h"
static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
uint8_t val);
@@ -217,6 +218,35 @@ static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr)
return result;
}
+/*
+ * Check if the address has required RWX privs when no PMP entry is matched.
+ */
+static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
+ target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
+ target_ulong mode)
+{
+ bool ret;
+
+ if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
+ /*
+ * Privileged spec v1.10 states if HW doesn't implement any PMP entry
+ * or no PMP entry matches an M-Mode access, the access succeeds.
+ */
+ ret = true;
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ } else {
+ /*
+ * Other modes are not allowed to succeed if they don't * match a rule,
+ * but there are rules. We've checked for no rule earlier in this
+ * function.
+ */
+ ret = false;
+ *allowed_privs = 0;
+ }
+
+ return ret;
+}
+
/*
* Public Interface
@@ -226,18 +256,19 @@ static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr)
* Check if the address has required RWX privs to complete desired operation
*/
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
- target_ulong size, pmp_priv_t privs, target_ulong mode)
+ target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
+ target_ulong mode)
{
int i = 0;
int ret = -1;
int pmp_size = 0;
target_ulong s = 0;
target_ulong e = 0;
- pmp_priv_t allowed_privs = 0;
/* Short cut if no rules */
if (0 == pmp_get_num_rules(env)) {
- return (env->priv == PRV_M) ? true : false;
+ return pmp_hart_has_privs_default(env, addr, size, privs,
+ allowed_privs, mode);
}
if (size == 0) {
@@ -277,37 +308,25 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
* check
*/
if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
- allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
if ((mode != PRV_M) || pmp_is_locked(env, i)) {
- allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
+ *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
}
- if ((privs & allowed_privs) == privs) {
- ret = 1;
- break;
- } else {
- ret = 0;
- break;
- }
+ ret = ((privs & *allowed_privs) == privs);
+ break;
}
}
/* No rule matched */
if (ret == -1) {
- if (mode == PRV_M) {
- ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an
- * M-Mode access, the access succeeds */
- } else {
- ret = 0; /* Other modes are not allowed to succeed if they don't
- * match a rule, but there are rules. We've checked for
- * no rule earlier in this function. */
- }
+ return pmp_hart_has_privs_default(env, addr, size, privs,
+ allowed_privs, mode);
}
return ret == 1 ? true : false;
}
-
/*
* Handle a write to a pmpcfg CSP
*/
@@ -329,6 +348,9 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
cfg_val = (val >> 8 * i) & 0xff;
pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
}
+
+ /* If PMP permission of any addr has been changed, flush TLB pages. */
+ tlb_flush(env_cpu(env));
}
@@ -442,3 +464,23 @@ bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
return false;
}
+
+/*
+ * Convert PMP privilege to TLB page privilege.
+ */
+int pmp_priv_to_page_prot(pmp_priv_t pmp_priv)
+{
+ int prot = 0;
+
+ if (pmp_priv & PMP_READ) {
+ prot |= PAGE_READ;
+ }
+ if (pmp_priv & PMP_WRITE) {
+ prot |= PAGE_WRITE;
+ }
+ if (pmp_priv & PMP_EXEC) {
+ prot |= PAGE_EXEC;
+ }
+
+ return prot;
+}
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
index c8d5ef4a69..b82a30f0d5 100644
--- a/target/riscv/pmp.h
+++ b/target/riscv/pmp.h
@@ -59,11 +59,13 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
target_ulong val);
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
- target_ulong size, pmp_priv_t priv, target_ulong mode);
+ target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
+ target_ulong mode);
bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
target_ulong *tlb_size);
void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
void pmp_update_rule_nums(CPURISCVState *env);
uint32_t pmp_get_num_rules(CPURISCVState *env);
+int pmp_priv_to_page_prot(pmp_priv_t pmp_priv);
#endif
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index 0f28b5f41e..2f9f5ccc62 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -68,20 +68,6 @@ typedef struct DisasContext {
} DisasContext;
#ifdef TARGET_RISCV64
-/* convert riscv funct3 to qemu memop for load/store */
-static const int tcg_memop_lookup[8] = {
- [0 ... 7] = -1,
- [0] = MO_SB,
- [1] = MO_TESW,
- [2] = MO_TESL,
- [3] = MO_TEQ,
- [4] = MO_UB,
- [5] = MO_TEUW,
- [6] = MO_TEUL,
-};
-#endif
-
-#ifdef TARGET_RISCV64
#define CASE_OP_32_64(X) case X: case glue(X, W)
#else
#define CASE_OP_32_64(X) case X
@@ -374,48 +360,6 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
ctx->base.is_jmp = DISAS_NORETURN;
}
-#ifdef TARGET_RISCV64
-static void gen_load_c(DisasContext *ctx, uint32_t opc, int rd, int rs1,
- target_long imm)
-{
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- gen_get_gpr(t0, rs1);
- tcg_gen_addi_tl(t0, t0, imm);
- int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
-
- if (memop < 0) {
- gen_exception_illegal(ctx);
- return;
- }
-
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
- gen_set_gpr(rd, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-}
-
-static void gen_store_c(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
- target_long imm)
-{
- TCGv t0 = tcg_temp_new();
- TCGv dat = tcg_temp_new();
- gen_get_gpr(t0, rs1);
- tcg_gen_addi_tl(t0, t0, imm);
- gen_get_gpr(dat, rs2);
- int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
-
- if (memop < 0) {
- gen_exception_illegal(ctx);
- return;
- }
-
- tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
- tcg_temp_free(t0);
- tcg_temp_free(dat);
-}
-#endif
-
#ifndef CONFIG_USER_ONLY
/* The states of mstatus_fs are:
* 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
@@ -447,83 +391,6 @@ static void mark_fs_dirty(DisasContext *ctx)
static inline void mark_fs_dirty(DisasContext *ctx) { }
#endif
-#if !defined(TARGET_RISCV64)
-static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
- int rs1, target_long imm)
-{
- TCGv t0;
-
- if (ctx->mstatus_fs == 0) {
- gen_exception_illegal(ctx);
- return;
- }
-
- t0 = tcg_temp_new();
- gen_get_gpr(t0, rs1);
- tcg_gen_addi_tl(t0, t0, imm);
-
- switch (opc) {
- case OPC_RISC_FLW:
- if (!has_ext(ctx, RVF)) {
- goto do_illegal;
- }
- tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
- /* RISC-V requires NaN-boxing of narrower width floating point values */
- tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
- break;
- case OPC_RISC_FLD:
- if (!has_ext(ctx, RVD)) {
- goto do_illegal;
- }
- tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
- break;
- do_illegal:
- default:
- gen_exception_illegal(ctx);
- break;
- }
- tcg_temp_free(t0);
-
- mark_fs_dirty(ctx);
-}
-
-static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
- int rs2, target_long imm)
-{
- TCGv t0;
-
- if (ctx->mstatus_fs == 0) {
- gen_exception_illegal(ctx);
- return;
- }
-
- t0 = tcg_temp_new();
- gen_get_gpr(t0, rs1);
- tcg_gen_addi_tl(t0, t0, imm);
-
- switch (opc) {
- case OPC_RISC_FSW:
- if (!has_ext(ctx, RVF)) {
- goto do_illegal;
- }
- tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
- break;
- case OPC_RISC_FSD:
- if (!has_ext(ctx, RVD)) {
- goto do_illegal;
- }
- tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
- break;
- do_illegal:
- default:
- gen_exception_illegal(ctx);
- break;
- }
-
- tcg_temp_free(t0);
-}
-#endif
-
static void gen_set_rm(DisasContext *ctx, int rm)
{
TCGv_i32 t0;
@@ -537,49 +404,6 @@ static void gen_set_rm(DisasContext *ctx, int rm)
tcg_temp_free_i32(t0);
}
-static void decode_RV32_64C0(DisasContext *ctx, uint16_t opcode)
-{
- uint8_t funct3 = extract16(opcode, 13, 3);
- uint8_t rd_rs2 = GET_C_RS2S(opcode);
- uint8_t rs1s = GET_C_RS1S(opcode);
-
- switch (funct3) {
- case 3:
-#if defined(TARGET_RISCV64)
- /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
- gen_load_c(ctx, OPC_RISC_LD, rd_rs2, rs1s,
- GET_C_LD_IMM(opcode));
-#else
- /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
- gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
- GET_C_LW_IMM(opcode));
-#endif
- break;
- case 7:
-#if defined(TARGET_RISCV64)
- /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
- gen_store_c(ctx, OPC_RISC_SD, rs1s, rd_rs2,
- GET_C_LD_IMM(opcode));
-#else
- /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
- gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
- GET_C_LW_IMM(opcode));
-#endif
- break;
- }
-}
-
-static void decode_RV32_64C(DisasContext *ctx, uint16_t opcode)
-{
- uint8_t op = extract16(opcode, 0, 2);
-
- switch (op) {
- case 0:
- decode_RV32_64C0(ctx, opcode);
- break;
- }
-}
-
static int ex_plus_1(DisasContext *ctx, int nf)
{
return nf + 1;
@@ -779,8 +603,7 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
} else {
ctx->pc_succ_insn = ctx->base.pc_next + 2;
if (!decode_insn16(ctx, opcode)) {
- /* fall back to old decoder */
- decode_RV32_64C(ctx, opcode);
+ gen_exception_illegal(ctx);
}
}
} else {