mirror of https://github.com/xemu-project/xemu.git
target/riscv: Move leaf pte processing out of level loop
Move the code that never loops outside of the loop. Unchain the if-return-else statements. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn> Tested-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Message-Id: <20230325105429.1142530-21-richard.henderson@linaro.org> Message-Id: <20230412114333.118895-21-richard.henderson@linaro.org> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
parent
8d6a00cdc0
commit
59688aa023
|
@ -865,6 +865,8 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
|
||||||
}
|
}
|
||||||
|
|
||||||
int ptshift = (levels - 1) * ptidxbits;
|
int ptshift = (levels - 1) * ptidxbits;
|
||||||
|
target_ulong pte;
|
||||||
|
hwaddr pte_addr;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
#if !TCG_OVERSIZED_GUEST
|
#if !TCG_OVERSIZED_GUEST
|
||||||
|
@ -881,7 +883,6 @@ restart:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* check that physical address of PTE is legal */
|
/* check that physical address of PTE is legal */
|
||||||
hwaddr pte_addr;
|
|
||||||
|
|
||||||
if (two_stage && first_stage) {
|
if (two_stage && first_stage) {
|
||||||
int vbase_prot;
|
int vbase_prot;
|
||||||
|
@ -913,7 +914,6 @@ restart:
|
||||||
return TRANSLATE_PMP_FAIL;
|
return TRANSLATE_PMP_FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
target_ulong pte;
|
|
||||||
if (riscv_cpu_mxl(env) == MXL_RV32) {
|
if (riscv_cpu_mxl(env) == MXL_RV32) {
|
||||||
pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
|
pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
|
||||||
} else {
|
} else {
|
||||||
|
@ -938,128 +938,140 @@ restart:
|
||||||
if (!(pte & PTE_V)) {
|
if (!(pte & PTE_V)) {
|
||||||
/* Invalid PTE */
|
/* Invalid PTE */
|
||||||
return TRANSLATE_FAIL;
|
return TRANSLATE_FAIL;
|
||||||
} else if (!pbmte && (pte & PTE_PBMT)) {
|
}
|
||||||
|
if (pte & (PTE_R | PTE_W | PTE_X)) {
|
||||||
|
goto leaf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Inner PTE, continue walking */
|
||||||
|
if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
|
||||||
return TRANSLATE_FAIL;
|
return TRANSLATE_FAIL;
|
||||||
} else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
|
}
|
||||||
/* Inner PTE, continue walking */
|
base = ppn << PGSHIFT;
|
||||||
if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
|
}
|
||||||
return TRANSLATE_FAIL;
|
|
||||||
}
|
/* No leaf pte at any translation level. */
|
||||||
base = ppn << PGSHIFT;
|
return TRANSLATE_FAIL;
|
||||||
} else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
|
|
||||||
/* Reserved leaf PTE flags: PTE_W */
|
leaf:
|
||||||
return TRANSLATE_FAIL;
|
if (ppn & ((1ULL << ptshift) - 1)) {
|
||||||
} else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
|
/* Misaligned PPN */
|
||||||
/* Reserved leaf PTE flags: PTE_W + PTE_X */
|
return TRANSLATE_FAIL;
|
||||||
return TRANSLATE_FAIL;
|
}
|
||||||
} else if ((pte & PTE_U) && ((mode != PRV_U) &&
|
if (!pbmte && (pte & PTE_PBMT)) {
|
||||||
(!sum || access_type == MMU_INST_FETCH))) {
|
/* Reserved without Svpbmt. */
|
||||||
/* User PTE flags when not U mode and mstatus.SUM is not set,
|
return TRANSLATE_FAIL;
|
||||||
or the access type is an instruction fetch */
|
}
|
||||||
return TRANSLATE_FAIL;
|
if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
|
||||||
} else if (!(pte & PTE_U) && (mode != PRV_S)) {
|
/* Reserved leaf PTE flags: PTE_W */
|
||||||
/* Supervisor PTE flags when not S mode */
|
return TRANSLATE_FAIL;
|
||||||
return TRANSLATE_FAIL;
|
}
|
||||||
} else if (ppn & ((1ULL << ptshift) - 1)) {
|
if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
|
||||||
/* Misaligned PPN */
|
/* Reserved leaf PTE flags: PTE_W + PTE_X */
|
||||||
return TRANSLATE_FAIL;
|
return TRANSLATE_FAIL;
|
||||||
} else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
|
}
|
||||||
((pte & PTE_X) && mxr))) {
|
if ((pte & PTE_U) &&
|
||||||
/* Read access check failed */
|
((mode != PRV_U) && (!sum || access_type == MMU_INST_FETCH))) {
|
||||||
return TRANSLATE_FAIL;
|
/*
|
||||||
} else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
|
* User PTE flags when not U mode and mstatus.SUM is not set,
|
||||||
/* Write access check failed */
|
* or the access type is an instruction fetch.
|
||||||
return TRANSLATE_FAIL;
|
*/
|
||||||
} else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
|
return TRANSLATE_FAIL;
|
||||||
/* Fetch access check failed */
|
}
|
||||||
return TRANSLATE_FAIL;
|
if (!(pte & PTE_U) && (mode != PRV_S)) {
|
||||||
} else {
|
/* Supervisor PTE flags when not S mode */
|
||||||
/* if necessary, set accessed and dirty bits. */
|
return TRANSLATE_FAIL;
|
||||||
target_ulong updated_pte = pte | PTE_A |
|
}
|
||||||
|
if (access_type == MMU_DATA_LOAD &&
|
||||||
|
!((pte & PTE_R) || ((pte & PTE_X) && mxr))) {
|
||||||
|
/* Read access check failed */
|
||||||
|
return TRANSLATE_FAIL;
|
||||||
|
}
|
||||||
|
if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
|
||||||
|
/* Write access check failed */
|
||||||
|
return TRANSLATE_FAIL;
|
||||||
|
}
|
||||||
|
if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
|
||||||
|
/* Fetch access check failed */
|
||||||
|
return TRANSLATE_FAIL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If necessary, set accessed and dirty bits. */
|
||||||
|
target_ulong updated_pte = pte | PTE_A |
|
||||||
(access_type == MMU_DATA_STORE ? PTE_D : 0);
|
(access_type == MMU_DATA_STORE ? PTE_D : 0);
|
||||||
|
|
||||||
/* Page table updates need to be atomic with MTTCG enabled */
|
/* Page table updates need to be atomic with MTTCG enabled */
|
||||||
if (updated_pte != pte) {
|
if (updated_pte != pte) {
|
||||||
if (!hade) {
|
if (!hade) {
|
||||||
return TRANSLATE_FAIL;
|
return TRANSLATE_FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* - if accessed or dirty bits need updating, and the PTE is
|
* - if accessed or dirty bits need updating, and the PTE is
|
||||||
* in RAM, then we do so atomically with a compare and swap.
|
* in RAM, then we do so atomically with a compare and swap.
|
||||||
* - if the PTE is in IO space or ROM, then it can't be updated
|
* - if the PTE is in IO space or ROM, then it can't be updated
|
||||||
* and we return TRANSLATE_FAIL.
|
* and we return TRANSLATE_FAIL.
|
||||||
* - if the PTE changed by the time we went to update it, then
|
* - if the PTE changed by the time we went to update it, then
|
||||||
* it is no longer valid and we must re-walk the page table.
|
* it is no longer valid and we must re-walk the page table.
|
||||||
*/
|
*/
|
||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
hwaddr l = sizeof(target_ulong), addr1;
|
hwaddr l = sizeof(target_ulong), addr1;
|
||||||
mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
|
mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
|
||||||
false, MEMTXATTRS_UNSPECIFIED);
|
false, MEMTXATTRS_UNSPECIFIED);
|
||||||
if (memory_region_is_ram(mr)) {
|
if (memory_region_is_ram(mr)) {
|
||||||
target_ulong *pte_pa =
|
target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
|
||||||
qemu_map_ram_ptr(mr->ram_block, addr1);
|
|
||||||
#if TCG_OVERSIZED_GUEST
|
#if TCG_OVERSIZED_GUEST
|
||||||
/*
|
/*
|
||||||
* MTTCG is not enabled on oversized TCG guests so
|
* MTTCG is not enabled on oversized TCG guests so
|
||||||
* page table updates do not need to be atomic
|
* page table updates do not need to be atomic
|
||||||
*/
|
*/
|
||||||
*pte_pa = pte = updated_pte;
|
*pte_pa = pte = updated_pte;
|
||||||
#else
|
#else
|
||||||
target_ulong old_pte =
|
target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
|
||||||
qatomic_cmpxchg(pte_pa, pte, updated_pte);
|
if (old_pte != pte) {
|
||||||
if (old_pte != pte) {
|
goto restart;
|
||||||
goto restart;
|
}
|
||||||
} else {
|
pte = updated_pte;
|
||||||
pte = updated_pte;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
/*
|
|
||||||
* misconfigured PTE in ROM (AD bits are not preset) or
|
|
||||||
* PTE is in IO space and can't be updated atomically
|
|
||||||
*/
|
|
||||||
return TRANSLATE_FAIL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* for superpage mappings, make a fake leaf PTE for the TLB's
|
* Misconfigured PTE in ROM (AD bits are not preset) or
|
||||||
* benefit.
|
* PTE is in IO space and can't be updated atomically.
|
||||||
*/
|
*/
|
||||||
target_ulong vpn = addr >> PGSHIFT;
|
return TRANSLATE_FAIL;
|
||||||
|
|
||||||
if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
|
|
||||||
napot_bits = ctzl(ppn) + 1;
|
|
||||||
if ((i != (levels - 1)) || (napot_bits != 4)) {
|
|
||||||
return TRANSLATE_FAIL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
napot_mask = (1 << napot_bits) - 1;
|
|
||||||
*physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
|
|
||||||
(vpn & (((target_ulong)1 << ptshift) - 1))
|
|
||||||
) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
|
|
||||||
|
|
||||||
/* set permissions on the TLB entry */
|
|
||||||
if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
|
|
||||||
*prot |= PAGE_READ;
|
|
||||||
}
|
|
||||||
if (pte & PTE_X) {
|
|
||||||
*prot |= PAGE_EXEC;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* add write permission on stores or if the page is already dirty,
|
|
||||||
* so that we TLB miss on later writes to update the dirty bit
|
|
||||||
*/
|
|
||||||
if ((pte & PTE_W) &&
|
|
||||||
(access_type == MMU_DATA_STORE || (pte & PTE_D))) {
|
|
||||||
*prot |= PAGE_WRITE;
|
|
||||||
}
|
|
||||||
return TRANSLATE_SUCCESS;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return TRANSLATE_FAIL;
|
|
||||||
|
/* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */
|
||||||
|
target_ulong vpn = addr >> PGSHIFT;
|
||||||
|
|
||||||
|
if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
|
||||||
|
napot_bits = ctzl(ppn) + 1;
|
||||||
|
if ((i != (levels - 1)) || (napot_bits != 4)) {
|
||||||
|
return TRANSLATE_FAIL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
napot_mask = (1 << napot_bits) - 1;
|
||||||
|
*physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
|
||||||
|
(vpn & (((target_ulong)1 << ptshift) - 1))
|
||||||
|
) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
|
||||||
|
|
||||||
|
/* set permissions on the TLB entry */
|
||||||
|
if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
|
||||||
|
*prot |= PAGE_READ;
|
||||||
|
}
|
||||||
|
if (pte & PTE_X) {
|
||||||
|
*prot |= PAGE_EXEC;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Add write permission on stores or if the page is already dirty,
|
||||||
|
* so that we TLB miss on later writes to update the dirty bit.
|
||||||
|
*/
|
||||||
|
if ((pte & PTE_W) && (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
|
||||||
|
*prot |= PAGE_WRITE;
|
||||||
|
}
|
||||||
|
return TRANSLATE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
|
static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
|
||||||
|
|
Loading…
Reference in New Issue