summaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-02-14 15:10:33 +0000
committerPeter Maydell <peter.maydell@linaro.org>2020-02-14 15:10:33 +0000
commitbc882694a3c757e7bd95c90e21b048d347ba9244 (patch)
tree10b14a50c3874e1abbe71b91662186abe24b69fe /target
parent517c84cef759a453cfb8f51498aebc909a5f3b39 (diff)
parentdc7a88d0810ad272bdcd2e0869359af78fdd9114 (diff)
downloadqemu-bc882694a3c757e7bd95c90e21b048d347ba9244.zip
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20200213' into staging
target-arm queue: * i.MX: Fix inverted sense of register bits in watchdog timer * i.MX: Add support for WDT on i.MX6 * arm/virt: cleanups to ACPI tables * Implement ARMv8.1-VMID16 extension * Implement ARMv8.1-PAN * Implement ARMv8.2-UAO * Implement ARMv8.2-ATS1E1 * ast2400/2500/2600: Wire up EHCI controllers * hw/char/exynos4210_uart: Fix memleaks in exynos4210_uart_init * hw/arm/raspi: Clean up the board code # gpg: Signature made Thu 13 Feb 2020 14:40:34 GMT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20200213: (46 commits) target/arm: Implement ARMv8.1-VMID16 extension hw/arm/raspi: Extract the cores count from the board revision hw/arm/raspi: Use a unique raspi_machine_class_init() method hw/arm/raspi: Extract the board model from the board revision hw/arm/raspi: Set default RAM size to size encoded in board revision hw/arm/raspi: Let class_init() directly call raspi_machine_init() hw/arm/raspi: Make board_rev a field of RaspiMachineClass hw/arm/raspi: Make machines children of abstract RaspiMachineClass hw/arm/raspi: Trivial code movement hw/arm/raspi: Extract the processor type from the board revision hw/arm/raspi: Extract the RAM size from the board revision hw/arm/raspi: Extract the version from the board revision hw/arm/raspi: Correct the board descriptions hw/arm/raspi: Use BCM2708 machine type with pre Device Tree kernels hw/char/exynos4210_uart: Fix memleaks in exynos4210_uart_init hw/arm: ast2600: Wire up EHCI controllers hw/arm: ast2400/ast2500: Wire up EHCI controllers target/arm: Enable ARMv8.2-UAO in -cpu max target/arm: Implement UAO semantics target/arm: Update MSR access to UAO ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/cpu-param.h2
-rw-r--r--target/arm/cpu.c4
-rw-r--r--target/arm/cpu.h95
-rw-r--r--target/arm/cpu64.c10
-rw-r--r--target/arm/helper-a64.c6
-rw-r--r--target/arm/helper.c327
-rw-r--r--target/arm/internals.h85
-rw-r--r--target/arm/kvm64.c2
-rw-r--r--target/arm/op_helper.c14
-rw-r--r--target/arm/translate-a64.c31
-rw-r--r--target/arm/translate.c42
11 files changed, 500 insertions, 118 deletions
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
index 18ac562346..d593b60b28 100644
--- a/target/arm/cpu-param.h
+++ b/target/arm/cpu-param.h
@@ -29,6 +29,6 @@
# define TARGET_PAGE_BITS_MIN 10
#endif
-#define NB_MMU_MODES 9
+#define NB_MMU_MODES 12
#endif
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index b0762a76c4..de733aceeb 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2709,6 +2709,10 @@ static void arm_max_initfn(Object *obj)
t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
cpu->isar.mvfr2 = t;
+ t = cpu->id_mmfr3;
+ t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
+ cpu->id_mmfr3 = t;
+
t = cpu->id_mmfr4;
t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
cpu->id_mmfr4 = t;
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 0b3036c484..e943ffe8a9 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -871,6 +871,7 @@ struct ARMCPU {
uint64_t id_aa64pfr1;
uint64_t id_aa64mmfr0;
uint64_t id_aa64mmfr1;
+ uint64_t id_aa64mmfr2;
} isar;
uint32_t midr;
uint32_t revidr;
@@ -1186,12 +1187,7 @@ void pmu_init(ARMCPU *cpu);
#define CPSR_IT_2_7 (0xfc00U)
#define CPSR_GE (0xfU << 16)
#define CPSR_IL (1U << 20)
-/* Note that the RESERVED bits include bit 21, which is PSTATE_SS in
- * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use
- * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32,
- * where it is live state but not accessible to the AArch32 code.
- */
-#define CPSR_RESERVED (0x7U << 21)
+#define CPSR_PAN (1U << 22)
#define CPSR_J (1U << 24)
#define CPSR_IT_0_1 (3U << 25)
#define CPSR_Q (1U << 27)
@@ -1209,8 +1205,6 @@ void pmu_init(ARMCPU *cpu);
#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
/* Execution state bits. MRS read as zero, MSR writes ignored. */
#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
-/* Mask of bits which may be set by exception return copying them from SPSR */
-#define CPSR_ERET_MASK (~CPSR_RESERVED)
/* Bit definitions for M profile XPSR. Most are the same as CPSR. */
#define XPSR_EXCP 0x1ffU
@@ -1258,6 +1252,8 @@ void pmu_init(ARMCPU *cpu);
#define PSTATE_BTYPE (3U << 10)
#define PSTATE_IL (1U << 20)
#define PSTATE_SS (1U << 21)
+#define PSTATE_PAN (1U << 22)
+#define PSTATE_UAO (1U << 23)
#define PSTATE_V (1U << 28)
#define PSTATE_C (1U << 29)
#define PSTATE_Z (1U << 30)
@@ -1727,6 +1723,15 @@ FIELD(ID_ISAR6, FHM, 8, 4)
FIELD(ID_ISAR6, SB, 12, 4)
FIELD(ID_ISAR6, SPECRES, 16, 4)
+FIELD(ID_MMFR3, CMAINTVA, 0, 4)
+FIELD(ID_MMFR3, CMAINTSW, 4, 4)
+FIELD(ID_MMFR3, BPMAINT, 8, 4)
+FIELD(ID_MMFR3, MAINTBCST, 12, 4)
+FIELD(ID_MMFR3, PAN, 16, 4)
+FIELD(ID_MMFR3, COHWALK, 20, 4)
+FIELD(ID_MMFR3, CMEMSZ, 24, 4)
+FIELD(ID_MMFR3, SUPERSEC, 28, 4)
+
FIELD(ID_MMFR4, SPECSEI, 0, 4)
FIELD(ID_MMFR4, AC2, 4, 4)
FIELD(ID_MMFR4, XNX, 8, 4)
@@ -1800,6 +1805,22 @@ FIELD(ID_AA64MMFR1, PAN, 20, 4)
FIELD(ID_AA64MMFR1, SPECSEI, 24, 4)
FIELD(ID_AA64MMFR1, XNX, 28, 4)
+FIELD(ID_AA64MMFR2, CNP, 0, 4)
+FIELD(ID_AA64MMFR2, UAO, 4, 4)
+FIELD(ID_AA64MMFR2, LSM, 8, 4)
+FIELD(ID_AA64MMFR2, IESB, 12, 4)
+FIELD(ID_AA64MMFR2, VARANGE, 16, 4)
+FIELD(ID_AA64MMFR2, CCIDX, 20, 4)
+FIELD(ID_AA64MMFR2, NV, 24, 4)
+FIELD(ID_AA64MMFR2, ST, 28, 4)
+FIELD(ID_AA64MMFR2, AT, 32, 4)
+FIELD(ID_AA64MMFR2, IDS, 36, 4)
+FIELD(ID_AA64MMFR2, FWB, 40, 4)
+FIELD(ID_AA64MMFR2, TTL, 48, 4)
+FIELD(ID_AA64MMFR2, BBM, 52, 4)
+FIELD(ID_AA64MMFR2, EVT, 56, 4)
+FIELD(ID_AA64MMFR2, E0PD, 60, 4)
+
FIELD(ID_DFR0, COPDBG, 0, 4)
FIELD(ID_DFR0, COPSDBG, 4, 4)
FIELD(ID_DFR0, MMAPDBG, 8, 4)
@@ -2751,20 +2772,24 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* 5. we want to be able to use the TLB for accesses done as part of a
* stage1 page table walk, rather than having to walk the stage2 page
* table over and over.
+ * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
+ * Never (PAN) bit within PSTATE.
*
* This gives us the following list of cases:
*
* NS EL0 EL1&0 stage 1+2 (aka NS PL0)
* NS EL1 EL1&0 stage 1+2 (aka NS PL1)
+ * NS EL1 EL1&0 stage 1+2 +PAN
* NS EL0 EL2&0
- * NS EL2 EL2&0
+ * NS EL2 EL2&0 +PAN
* NS EL2 (aka NS PL2)
* S EL0 EL1&0 (aka S PL0)
* S EL1 EL1&0 (not used if EL3 is 32 bit)
+ * S EL1 EL1&0 +PAN
* S EL3 (aka S PL1)
* NS EL1&0 stage 2
*
- * for a total of 9 different mmu_idx.
+ * for a total of 12 different mmu_idx.
*
* R profile CPUs have an MPU, but can use the same set of MMU indexes
* as A profile. They only need to distinguish NS EL0 and NS EL1 (and
@@ -2819,19 +2844,22 @@ typedef enum ARMMMUIdx {
/*
* A-profile.
*/
- ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
- ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1_PAN = 3 | ARM_MMU_IDX_A,
- ARMMMUIdx_E2 = 3 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_2 = 4 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E2 = 4 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2 = 5 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2_PAN = 6 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE10_0 = 5 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE10_1 = 6 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE10_0 = 7 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE10_1 = 8 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE10_1_PAN = 9 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE3 = 10 | ARM_MMU_IDX_A,
- ARMMMUIdx_Stage2 = 8 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
/*
* These are not allocated TLBs and are used only for AT system
@@ -2839,6 +2867,7 @@ typedef enum ARMMMUIdx {
*/
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
/*
* M-profile.
@@ -2864,10 +2893,13 @@ typedef enum ARMMMUIdxBit {
TO_CORE_BIT(E10_0),
TO_CORE_BIT(E20_0),
TO_CORE_BIT(E10_1),
+ TO_CORE_BIT(E10_1_PAN),
TO_CORE_BIT(E2),
TO_CORE_BIT(E20_2),
+ TO_CORE_BIT(E20_2_PAN),
TO_CORE_BIT(SE10_0),
TO_CORE_BIT(SE10_1),
+ TO_CORE_BIT(SE10_1_PAN),
TO_CORE_BIT(SE3),
TO_CORE_BIT(Stage2),
@@ -3432,6 +3464,16 @@ static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id)
return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 4;
}
+static inline bool isar_feature_aa32_pan(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->mvfr0, ID_MMFR3, PAN) != 0;
+}
+
+static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->mvfr0, ID_MMFR3, PAN) >= 2;
+}
+
/*
* 64-bit feature tests via id registers.
*/
@@ -3591,6 +3633,21 @@ static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
}
+static inline bool isar_feature_aa64_pan(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0;
+}
+
+static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2;
+}
+
+static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;
+}
+
static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index c80fb5fd43..f0d98bc79d 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -673,8 +673,14 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
+ t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
+ t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */
cpu->isar.id_aa64mmfr1 = t;
+ t = cpu->isar.id_aa64mmfr2;
+ t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);
+ cpu->isar.id_aa64mmfr2 = t;
+
/* Replicate the same data to the 32-bit id registers. */
u = cpu->isar.id_isar5;
u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
@@ -693,6 +699,10 @@ static void aarch64_max_initfn(Object *obj)
u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
cpu->isar.id_isar6 = u;
+ u = cpu->id_mmfr3;
+ u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
+ cpu->id_mmfr3 = u;
+
/*
* FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet,
* so do not set MVFR1.FPHP. Strictly speaking this is not legal,
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
index bf45f8a785..509ae93069 100644
--- a/target/arm/helper-a64.c
+++ b/target/arm/helper-a64.c
@@ -959,7 +959,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
{
int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
- uint32_t spsr = env->banked_spsr[spsr_idx];
+ uint32_t mask, spsr = env->banked_spsr[spsr_idx];
int new_el;
bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
@@ -1014,7 +1014,8 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
* will sort the register banks out for us, and we've already
* caught all the bad-mode cases in el_from_spsr().
*/
- cpsr_write(env, spsr, ~0, CPSRWriteRaw);
+ mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
+ cpsr_write(env, spsr, mask, CPSRWriteRaw);
if (!arm_singlestep_active(env)) {
env->uncached_cpsr &= ~PSTATE_SS;
}
@@ -1031,6 +1032,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
cur_el, new_el, env->regs[15]);
} else {
env->aarch64 = 1;
+ spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar);
pstate_write(env, spsr);
if (!arm_singlestep_active(env)) {
env->pstate &= ~PSTATE_SS;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 7d15d5c933..366dbcf460 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -671,6 +671,7 @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
tlb_flush_by_mmuidx(cs,
ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0 |
ARMMMUIdxBit_Stage2);
}
@@ -682,6 +683,7 @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
tlb_flush_by_mmuidx_all_cpus_synced(cs,
ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0 |
ARMMMUIdxBit_Stage2);
}
@@ -2700,6 +2702,7 @@ static int gt_phys_redir_timeridx(CPUARMState *env)
switch (arm_mmu_idx(env)) {
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
return GTIMER_HYP;
default:
return GTIMER_PHYS;
@@ -2711,6 +2714,7 @@ static int gt_virt_redir_timeridx(CPUARMState *env)
switch (arm_mmu_idx(env)) {
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
return GTIMER_HYPVIRT;
default:
return GTIMER_VIRT;
@@ -3261,8 +3265,7 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
bool take_exc = false;
if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
- && (mmu_idx == ARMMMUIdx_Stage1_E1 ||
- mmu_idx == ARMMMUIdx_Stage1_E0)) {
+ && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
/*
* Synchronous stage 2 fault on an access made as part of the
* translation table walk for AT S1E0* or AT S1E1* insn
@@ -3338,7 +3341,9 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
if (arm_feature(env, ARM_FEATURE_EL2)) {
- if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1) {
+ if (mmu_idx == ARMMMUIdx_E10_0 ||
+ mmu_idx == ARMMMUIdx_E10_1 ||
+ mmu_idx == ARMMMUIdx_E10_1_PAN) {
format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
} else {
format64 |= arm_current_el(env) == 2;
@@ -3404,16 +3409,21 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
switch (ri->opc2 & 6) {
case 0:
- /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
+ /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
switch (el) {
case 3:
mmu_idx = ARMMMUIdx_SE3;
break;
case 2:
- mmu_idx = ARMMMUIdx_Stage1_E1;
- break;
+ g_assert(!secure); /* TODO: ARMv8.4-SecEL2 */
+ /* fall through */
case 1:
- mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
+ if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
+ mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN
+ : ARMMMUIdx_Stage1_E1_PAN);
+ } else {
+ mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
+ }
break;
default:
g_assert_not_reached();
@@ -3482,8 +3492,13 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
switch (ri->opc2 & 6) {
case 0:
switch (ri->opc1) {
- case 0: /* AT S1E1R, AT S1E1W */
- mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
+ case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
+ if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
+ mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN
+ : ARMMMUIdx_Stage1_E1_PAN);
+ } else {
+ mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
+ }
break;
case 4: /* AT S1E2R, AT S1E2W */
mmu_idx = ARMMMUIdx_E2;
@@ -3798,7 +3813,9 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
(arm_hcr_el2_eff(env) & HCR_E2H)) {
tlb_flush_by_mmuidx(env_cpu(env),
- ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_0);
+ ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E20_0);
}
raw_write(env, ri, value);
}
@@ -3816,6 +3833,7 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (raw_read(env, ri) != value) {
tlb_flush_by_mmuidx(cs,
ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0 |
ARMMMUIdxBit_Stage2);
raw_write(env, ri, value);
@@ -4155,6 +4173,42 @@ static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
env->daif = value & PSTATE_DAIF;
}
+static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_PAN;
+}
+
+static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
+}
+
+static const ARMCPRegInfo pan_reginfo = {
+ .name = "PAN", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
+ .type = ARM_CP_NO_RAW, .access = PL1_RW,
+ .readfn = aa64_pan_read, .writefn = aa64_pan_write
+};
+
+static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_UAO;
+}
+
+static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
+}
+
+static const ARMCPRegInfo uao_reginfo = {
+ .name = "UAO", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
+ .type = ARM_CP_NO_RAW, .access = PL1_RW,
+ .readfn = aa64_uao_read, .writefn = aa64_uao_write
+};
+
static CPAccessResult aa64_cacheop_access(CPUARMState *env,
const ARMCPRegInfo *ri,
bool isread)
@@ -4176,12 +4230,18 @@ static int vae1_tlbmask(CPUARMState *env)
{
/* Since we exclude secure first, we may read HCR_EL2 directly. */
if (arm_is_secure_below_el3(env)) {
- return ARMMMUIdxBit_SE10_1 | ARMMMUIdxBit_SE10_0;
+ return ARMMMUIdxBit_SE10_1 |
+ ARMMMUIdxBit_SE10_1_PAN |
+ ARMMMUIdxBit_SE10_0;
} else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE))
== (HCR_E2H | HCR_TGE)) {
- return ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_0;
+ return ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E20_0;
} else {
- return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0;
+ return ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0;
}
}
@@ -4215,18 +4275,28 @@ static int alle1_tlbmask(CPUARMState *env)
* stage 1 translations.
*/
if (arm_is_secure_below_el3(env)) {
- return ARMMMUIdxBit_SE10_1 | ARMMMUIdxBit_SE10_0;
+ return ARMMMUIdxBit_SE10_1 |
+ ARMMMUIdxBit_SE10_1_PAN |
+ ARMMMUIdxBit_SE10_0;
} else if (arm_feature(env, ARM_FEATURE_EL2)) {
- return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0 | ARMMMUIdxBit_Stage2;
+ return ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_Stage2;
} else {
- return ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_0;
+ return ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0;
}
}
static int e2_tlbmask(CPUARMState *env)
{
/* TODO: ARMv8.4-SecEL2 */
- return ARMMMUIdxBit_E20_0 | ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E2;
+ return ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E2;
}
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -6310,6 +6380,35 @@ static CPAccessResult access_lor_other(CPUARMState *env,
return access_lor_ns(env);
}
+/*
+ * A trivial implementation of ARMv8.1-LOR leaves all of these
+ * registers fixed at 0, which indicates that there are zero
+ * supported Limited Ordering regions.
+ */
+static const ARMCPRegInfo lor_reginfo[] = {
+ { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
+ .access = PL1_R, .accessfn = access_lorid,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
#ifdef TARGET_AARCH64
static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
@@ -6612,6 +6711,32 @@ static const ARMCPRegInfo vhe_reginfo[] = {
REGINFO_SENTINEL
};
+#ifndef CONFIG_USER_ONLY
+static const ARMCPRegInfo ats1e1_reginfo[] = {
+ { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo ats1cp_reginfo[] = {
+ { .name = "ATS1CPRP",
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write },
+ { .name = "ATS1CPWP",
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write },
+ REGINFO_SENTINEL
+};
+#endif
+
void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
@@ -6966,11 +7091,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
.resetvalue = cpu->isar.id_aa64mmfr1 },
- { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = 0 },
+ .resetvalue = cpu->isar.id_aa64mmfr2 },
{ .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7544,36 +7669,22 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
if (cpu_isar_feature(aa64_lor, cpu)) {
- /*
- * A trivial implementation of ARMv8.1-LOR leaves all of these
- * registers fixed at 0, which indicates that there are zero
- * supported Limited Ordering regions.
- */
- static const ARMCPRegInfo lor_reginfo[] = {
- { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
- .access = PL1_RW, .accessfn = access_lor_other,
- .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
- .access = PL1_RW, .accessfn = access_lor_other,
- .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
- .access = PL1_RW, .accessfn = access_lor_other,
- .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
- .access = PL1_RW, .accessfn = access_lor_other,
- .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
- .access = PL1_R, .accessfn = access_lorid,
- .type = ARM_CP_CONST, .resetvalue = 0 },
- REGINFO_SENTINEL
- };
define_arm_cp_regs(cpu, lor_reginfo);
}
+ if (cpu_isar_feature(aa64_pan, cpu)) {
+ define_one_arm_cp_reg(cpu, &pan_reginfo);
+ }
+#ifndef CONFIG_USER_ONLY
+ if (cpu_isar_feature(aa64_ats1e1, cpu)) {
+ define_arm_cp_regs(cpu, ats1e1_reginfo);
+ }
+ if (cpu_isar_feature(aa32_ats1e1, cpu)) {
+ define_arm_cp_regs(cpu, ats1cp_reginfo);
+ }
+#endif
+ if (cpu_isar_feature(aa64_uao, cpu)) {
+ define_one_arm_cp_reg(cpu, &uao_reginfo);
+ }
if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
define_arm_cp_regs(cpu, vhe_reginfo);
@@ -8717,8 +8828,12 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
uint32_t mask, uint32_t offset,
uint32_t newpc)
{
+ int new_el;
+
/* Change the CPU state so as to actually take the exception. */
switch_mode(env, new_mode);
+ new_el = arm_current_el(env);
+
/*
* For exceptions taken to AArch32 we must clear the SS bit in both
* PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
@@ -8731,7 +8846,7 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
/* Set new mode endianness */
env->uncached_cpsr &= ~CPSR_E;
- if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
+ if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
env->uncached_cpsr |= CPSR_E;
}
/* J and IL must always be cleared for exception entry */
@@ -8742,6 +8857,25 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
env->elr_el[2] = env->regs[15];
} else {
+ /* CPSR.PAN is normally preserved preserved unless... */
+ if (cpu_isar_feature(aa64_pan, env_archcpu(env))) {
+ switch (new_el) {
+ case 3:
+ if (!arm_is_secure_below_el3(env)) {
+ /* ... the target is EL3, from non-secure state. */
+ env->uncached_cpsr &= ~CPSR_PAN;
+ break;
+ }
+ /* ... the target is EL3, from secure state ... */
+ /* fall through */
+ case 1:
+ /* ... the target is EL1 and SCTLR.SPAN is 0. */
+ if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
+ env->uncached_cpsr |= CPSR_PAN;
+ }
+ break;
+ }
+ }
/*
* this is a lie, as there was no c1_sys on V4T/V5, but who cares
* and we should just guard the thumb mode on V4
@@ -9004,6 +9138,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
unsigned int new_el = env->exception.target_el;
target_ulong addr = env->cp15.vbar_el[new_el];
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
+ unsigned int old_mode;
unsigned int cur_el = arm_current_el(env);
/*
@@ -9083,20 +9218,43 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
}
if (is_a64(env)) {
- env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
+ old_mode = pstate_read(env);
aarch64_save_sp(env, arm_current_el(env));
env->elr_el[new_el] = env->pc;
} else {
- env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
+ old_mode = cpsr_read(env);
env->elr_el[new_el] = env->regs[15];
aarch64_sync_32_to_64(env);
env->condexec_bits = 0;
}
+ env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
+
qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
env->elr_el[new_el]);
+ if (cpu_isar_feature(aa64_pan, cpu)) {
+ /* The value of PSTATE.PAN is normally preserved, except when ... */
+ new_mode |= old_mode & PSTATE_PAN;
+ switch (new_el) {
+ case 2:
+ /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
+ if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
+ != (HCR_E2H | HCR_TGE)) {
+ break;
+ }
+ /* fall through */
+ case 1:
+ /* ... the target is EL1 ... */
+ /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
+ if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
+ new_mode |= PSTATE_PAN;
+ }
+ break;
+ }
+ }
+
pstate_write(env, PSTATE_DAIF | new_mode);
env->aarch64 = 1;
aarch64_restore_sp(env, new_el);
@@ -9207,6 +9365,7 @@ static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_Stage2:
case ARMMMUIdx_E2:
return 2;
@@ -9215,10 +9374,13 @@ static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_SE10_0:
return arm_el_is_aa64(env, 3) ? 1 : 3;
case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_MPrivNegPri:
case ARMMMUIdx_MUserNegPri:
case ARMMMUIdx_MPriv:
@@ -9285,8 +9447,7 @@ static inline bool regime_translation_disabled(CPUARMState *env,
}
}
- if ((env->cp15.hcr_el2 & HCR_DC) &&
- (mmu_idx == ARMMMUIdx_Stage1_E0 || mmu_idx == ARMMMUIdx_Stage1_E1)) {
+ if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
return true;
}
@@ -9335,6 +9496,8 @@ static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
return ARMMMUIdx_Stage1_E0;
case ARMMMUIdx_E10_1:
return ARMMMUIdx_Stage1_E1;
+ case ARMMMUIdx_E10_1_PAN:
+ return ARMMMUIdx_Stage1_E1_PAN;
default:
return mmu_idx;
}
@@ -9381,6 +9544,7 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
return false;
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
g_assert_not_reached();
}
}
@@ -9517,6 +9681,9 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
if (is_user) {
prot_rw = user_rw;
} else {
+ if (user_rw && regime_is_pan(env, mmu_idx)) {
+ return 0;
+ }
prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
}
@@ -9595,7 +9762,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
hwaddr addr, MemTxAttrs txattrs,
ARMMMUFaultInfo *fi)
{
- if ((mmu_idx == ARMMMUIdx_Stage1_E0 || mmu_idx == ARMMMUIdx_Stage1_E1) &&
+ if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
!regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
target_ulong s2size;
hwaddr s2pa;
@@ -11273,7 +11440,9 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
target_ulong *page_size,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
- if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1) {
+ if (mmu_idx == ARMMMUIdx_E10_0 ||
+ mmu_idx == ARMMMUIdx_E10_1 ||
+ mmu_idx == ARMMMUIdx_E10_1_PAN) {
/* Call ourselves recursively to do the stage 1 and then stage 2
* translations.
*/
@@ -11800,10 +11969,13 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
case ARMMMUIdx_SE10_0:
return 0;
case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
return 1;
case ARMMMUIdx_E2:
case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
return 2;
case ARMMMUIdx_SE3:
return 3;
@@ -11838,13 +12010,22 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
return ARMMMUIdx_E10_0;
case 1:
if (arm_is_secure_below_el3(env)) {
+ if (env->pstate & PSTATE_PAN) {
+ return ARMMMUIdx_SE10_1_PAN;
+ }
return ARMMMUIdx_SE10_1;
}
+ if (env->pstate & PSTATE_PAN) {
+ return ARMMMUIdx_E10_1_PAN;
+ }
return ARMMMUIdx_E10_1;
case 2:
/* TODO: ARMv8.4-SecEL2 */
/* Note that TGE does not apply at EL2. */
if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) {
+ if (env->pstate & PSTATE_PAN) {
+ return ARMMMUIdx_E20_2_PAN;
+ }
return ARMMMUIdx_E20_2;
}
return ARMMMUIdx_E2;
@@ -12017,25 +12198,29 @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
}
/* Compute the condition for using AccType_UNPRIV for LDTR et al. */
- /* TODO: ARMv8.2-UAO */
- switch (mmu_idx) {
- case ARMMMUIdx_E10_1:
- case ARMMMUIdx_SE10_1:
- /* TODO: ARMv8.3-NV */
- flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
- break;
- case ARMMMUIdx_E20_2:
- /* TODO: ARMv8.4-SecEL2 */
- /*
- * Note that E20_2 is gated by HCR_EL2.E2H == 1, but E20_0 is
- * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
- */
- if (env->cp15.hcr_el2 & HCR_TGE) {
+ if (!(env->pstate & PSTATE_UAO)) {
+ switch (mmu_idx) {
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
+ /* TODO: ARMv8.3-NV */
flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
+ break;
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ /* TODO: ARMv8.4-SecEL2 */
+ /*
+ * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
+ * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
+ */
+ if (env->cp15.hcr_el2 & HCR_TGE) {
+ flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
+ }
+ break;
+ default:
+ break;
}
- break;
- default:
- break;
}
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 6d4a942bde..58c4d707c5 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -843,12 +843,16 @@ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_SE10_0:
case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
return true;
default:
return false;
@@ -861,10 +865,13 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E2:
case ARMMMUIdx_Stage2:
case ARMMMUIdx_MPrivNegPri:
@@ -875,6 +882,7 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_SE3:
case ARMMMUIdx_SE10_0:
case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
case ARMMMUIdx_MSPrivNegPri:
case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPriv:
@@ -885,6 +893,19 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
}
}
+static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_SE10_1_PAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* Return the FSR value for a debug exception (watchpoint, hardware
* breakpoint or BKPT insn) targeting the specified exception level.
*/
@@ -1034,6 +1055,70 @@ static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
#endif
+/**
+ * arm_mmu_idx_is_stage1_of_2:
+ * @mmu_idx: The ARMMMUIdx to test
+ *
+ * Return true if @mmu_idx is a NOTLB mmu_idx that is the
+ * first stage of a two stage regime.
+ */
+static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
+ const ARMISARegisters *id)
+{
+ uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
+
+ if ((features >> ARM_FEATURE_V4T) & 1) {
+ valid |= CPSR_T;
+ }
+ if ((features >> ARM_FEATURE_V5) & 1) {
+ valid |= CPSR_Q; /* V5TE in reality*/
+ }
+ if ((features >> ARM_FEATURE_V6) & 1) {
+ valid |= CPSR_E | CPSR_GE;
+ }
+ if ((features >> ARM_FEATURE_THUMB2) & 1) {
+ valid |= CPSR_IT;
+ }
+ if (isar_feature_jazelle(id)) {
+ valid |= CPSR_J;
+ }
+ if (isar_feature_aa32_pan(id)) {
+ valid |= CPSR_PAN;
+ }
+
+ return valid;
+}
+
+static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
+{
+ uint32_t valid;
+
+ valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
+ if (isar_feature_aa64_bti(id)) {
+ valid |= PSTATE_BTYPE;
+ }
+ if (isar_feature_aa64_pan(id)) {
+ valid |= PSTATE_PAN;
+ }
+ if (isar_feature_aa64_uao(id)) {
+ valid |= PSTATE_UAO;
+ }
+
+ return valid;
+}
+
/*
* Parameters of a given virtual address, as extracted from the
* translation control register (TCR) for a given regime.
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index fb21ab9e73..3bae9e4a66 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -549,6 +549,8 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
ARM64_SYS_REG(3, 0, 0, 7, 0));
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
ARM64_SYS_REG(3, 0, 0, 7, 1));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
+ ARM64_SYS_REG(3, 0, 0, 7, 2));
/*
* Note that if AArch32 support is not present in the host,
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 27d16ad9ad..af3020b78f 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -387,7 +387,14 @@ void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
uint32_t HELPER(cpsr_read)(CPUARMState *env)
{
- return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
+ /*
+ * We store the ARMv8 PSTATE.SS bit in env->uncached_cpsr.
+ * This is convenient for populating SPSR_ELx, but must be
+ * hidden from aarch32 mode, where it is not visible.
+ *
+ * TODO: ARMv8.4-DIT -- need to move SS somewhere else.
+ */
+ return cpsr_read(env) & ~(CPSR_EXEC | PSTATE_SS);
}
void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
@@ -400,11 +407,14 @@ void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
/* Write the CPSR for a 32-bit exception return */
void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
{
+ uint32_t mask;
+
qemu_mutex_lock_iothread();
arm_call_pre_el_change_hook(env_archcpu(env));
qemu_mutex_unlock_iothread();
- cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
+ mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
+ cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
/* Generated code has already stored the new PC value, but
* without masking out its low bits, because which bits need
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 6e82486884..7c26c3bfeb 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -124,12 +124,15 @@ static int get_a64_user_mem_index(DisasContext *s)
*/
switch (useridx) {
case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
useridx = ARMMMUIdx_E10_0;
break;
case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
useridx = ARMMMUIdx_E20_0;
break;
case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
useridx = ARMMMUIdx_SE10_0;
break;
default:
@@ -1599,6 +1602,34 @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
s->base.is_jmp = DISAS_NEXT;
break;
+ case 0x03: /* UAO */
+ if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
+ goto do_unallocated;
+ }
+ if (crm & 1) {
+ set_pstate_bits(PSTATE_UAO);
+ } else {
+ clear_pstate_bits(PSTATE_UAO);
+ }
+ t1 = tcg_const_i32(s->current_el);
+ gen_helper_rebuild_hflags_a64(cpu_env, t1);
+ tcg_temp_free_i32(t1);
+ break;
+
+ case 0x04: /* PAN */
+ if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
+ goto do_unallocated;
+ }
+ if (crm & 1) {
+ set_pstate_bits(PSTATE_PAN);
+ } else {
+ clear_pstate_bits(PSTATE_PAN);
+ }
+ t1 = tcg_const_i32(s->current_el);
+ gen_helper_rebuild_hflags_a64(cpu_env, t1);
+ tcg_temp_free_i32(t1);
+ break;
+
case 0x05: /* SPSel */
if (s->current_el == 0) {
goto do_unallocated;
diff --git a/target/arm/translate.c b/target/arm/translate.c
index e11a5871d0..20f89ace2f 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -155,10 +155,12 @@ static inline int get_a32_user_mem_index(DisasContext *s)
case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
case ARMMMUIdx_SE3:
case ARMMMUIdx_SE10_0:
case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
case ARMMMUIdx_MUser:
case ARMMMUIdx_MPriv:
@@ -2732,39 +2734,33 @@ static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
/* Return the mask of PSR bits set by a MSR instruction. */
static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
{
- uint32_t mask;
+ uint32_t mask = 0;
- mask = 0;
- if (flags & (1 << 0))
+ if (flags & (1 << 0)) {
mask |= 0xff;
- if (flags & (1 << 1))
- mask |= 0xff00;
- if (flags & (1 << 2))
- mask |= 0xff0000;
- if (flags & (1 << 3))
- mask |= 0xff000000;
-
- /* Mask out undefined bits. */
- mask &= ~CPSR_RESERVED;
- if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
- mask &= ~CPSR_T;
}
- if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
- mask &= ~CPSR_Q; /* V5TE in reality*/
+ if (flags & (1 << 1)) {
+ mask |= 0xff00;
}
- if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
- mask &= ~(CPSR_E | CPSR_GE);
+ if (flags & (1 << 2)) {
+ mask |= 0xff0000;
}
- if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
- mask &= ~CPSR_IT;
+ if (flags & (1 << 3)) {
+ mask |= 0xff000000;
}
- /* Mask out execution state and reserved bits. */
+
+ /* Mask out undefined and reserved bits. */
+ mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
+
+ /* Mask out execution state. */
if (!spsr) {
- mask &= ~(CPSR_EXEC | CPSR_RESERVED);
+ mask &= ~CPSR_EXEC;
}
+
/* Mask out privileged bits. */
- if (IS_USER(s))
+ if (IS_USER(s)) {
mask &= CPSR_USER;
+ }
return mask;
}