diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2020-01-03 15:27:24 +1100 |
---|---|---|
committer | David Gibson <david@gibson.dropbear.id.au> | 2020-03-17 09:41:15 +1100 |
commit | 682c1dfb86acb19f542d06375b97c604ee000730 (patch) | |
tree | d6ca7806aebde01b8abf7eef60f2d9b600ecc9d7 /target | |
parent | 19acd4b610a2f6a8db2840faecaab813089a33b2 (diff) | |
download | qemu-682c1dfb86acb19f542d06375b97c604ee000730.zip |
target/ppc: Correct handling of real mode accesses with vhyp on hash MMU
On ppc we have the concept of virtual hypervisor ("vhyp") mode, where we
only model the non-hypervisor-privileged parts of the cpu. Essentially we
model the hypervisor's behaviour from the point of view of a guest OS, but
we don't model the hypervisor's execution.
In particular, in this mode, qemu's notion of target physical address is
a guest physical address from the vcpu's point of view. So accesses in
guest real mode don't require translation. If we were modelling the
hypervisor mode, we'd need to translate the guest physical address into
a host physical address.
Currently, we handle this sloppily: we rely on setting up the virtual LPCR
and RMOR registers so that GPAs are simply HPAs plus an offset, which we
set to zero. This is already conceptually dubious, since the LPCR and RMOR
registers don't exist in the non-hypervisor portion of the CPU. It gets
worse with POWER9, where RMOR and LPCR[VPM0] no longer exist at all.
Clean this up by explicitly handling the vhyp case. While we're there,
remove some unnecessary nesting of if statements that made the logic to
select the correct real mode behaviour a bit less clear than it could be.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Reviewed-by: Greg Kurz <groug@kaod.org>
Diffstat (limited to 'target')
-rw-r--r-- | target/ppc/mmu-hash64.c | 60 |
1 files changed, 35 insertions, 25 deletions
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index 3e0be4d55f..392f90e0ae 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -789,27 +789,30 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; - /* In HV mode, add HRMOR if top EA bit is clear */ - if (msr_hv || !env->has_hv_mode) { + if (cpu->vhyp) { + /* + * In virtual hypervisor mode, there's nothing to do: + * EA == GPA == qemu guest address + */ + } else if (msr_hv || !env->has_hv_mode) { + /* In HV mode, add HRMOR if top EA bit is clear */ if (!(eaddr >> 63)) { raddr |= env->spr[SPR_HRMOR]; } - } else { - /* Otherwise, check VPM for RMA vs VRMA */ - if (env->spr[SPR_LPCR] & LPCR_VPM0) { - slb = &env->vrma_slb; - if (slb->sps) { - goto skip_slb_search; - } - /* Not much else to do here */ + } else if (env->spr[SPR_LPCR] & LPCR_VPM0) { + /* Emulated VRMA mode */ + slb = &env->vrma_slb; + if (!slb->sps) { + /* Invalid VRMA setup, machine check */ cs->exception_index = POWERPC_EXCP_MCHECK; env->error_code = 0; return 1; - } else if (raddr < env->rmls) { - /* RMA. Check bounds in RMLS */ - raddr |= env->spr[SPR_RMOR]; - } else { - /* The access failed, generate the approriate interrupt */ + } + + goto skip_slb_search; + } else { + /* Emulated old-style RMO mode, bounds check against RMLS */ + if (raddr >= env->rmls) { if (rwx == 2) { ppc_hash64_set_isi(cs, SRR1_PROTFAULT); } else { @@ -821,6 +824,8 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, } return 1; } + + raddr |= env->spr[SPR_RMOR]; } tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, @@ -953,22 +958,27 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) /* In real mode the top 4 effective address bits are ignored */ raddr = addr & 0x0FFFFFFFFFFFFFFFULL; - /* In HV mode, add HRMOR if top EA bit is clear */ - if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) { + if (cpu->vhyp) { + /* + * In virtual hypervisor mode, there's nothing to do: + * EA == GPA == qemu guest address + */ + return raddr; + } else if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) { + /* In HV mode, add HRMOR if top EA bit is clear */ return raddr | env->spr[SPR_HRMOR]; - } - - /* Otherwise, check VPM for RMA vs VRMA */ - if (env->spr[SPR_LPCR] & LPCR_VPM0) { + } else if (env->spr[SPR_LPCR] & LPCR_VPM0) { + /* Emulated VRMA mode */ slb = &env->vrma_slb; if (!slb->sps) { return -1; } - } else if (raddr < env->rmls) { - /* RMA. Check bounds in RMLS */ - return raddr | env->spr[SPR_RMOR]; } else { - return -1; + /* Emulated old-style RMO mode, bounds check against RMLS */ + if (raddr >= env->rmls) { + return -1; + } + return raddr | env->spr[SPR_RMOR]; } } else { slb = slb_lookup(cpu, addr); |