RISC-V: Improve page table walker spec compliance

- Inline PTE_TABLE check for better readability
- Change access checks from ternary operator to if
- Improve readibility of User page U mode and SUM test
- Disallow non U mode from fetching from User pages
- Add reserved PTE flag check: W or W|X
- Add misaligned PPN check
- Set READ protection for PTE X flag and mstatus.mxr
- Use memory_region_is_ram in pte update

Cc: Sagar Karandikar <sagark@eecs.berkeley.edu>
Cc: Bastian Koppelmann <kbastian@mail.uni-paderborn.de>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Alistair Francis <Alistair.Francis@wdc.com>
Signed-off-by: Michael Clark <mjc@sifive.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
Michael Clark 2018-03-05 09:27:28 +13:00 committed by Alistair Francis
parent 718a941e19
commit c3b03e5800
2 changed files with 45 additions and 21 deletions

View File

@ -407,5 +407,3 @@
#define PTE_SOFT 0x300 /* Reserved for Software */ #define PTE_SOFT 0x300 /* Reserved for Software */
#define PTE_PPN_SHIFT 10 #define PTE_PPN_SHIFT 10
#define PTE_TABLE(PTE) (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V)

View File

@ -185,16 +185,39 @@ restart:
#endif #endif
target_ulong ppn = pte >> PTE_PPN_SHIFT; target_ulong ppn = pte >> PTE_PPN_SHIFT;
if (PTE_TABLE(pte)) { /* next level of page table */ if (!(pte & PTE_V)) {
/* Invalid PTE */
return TRANSLATE_FAIL;
} else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
/* Inner PTE, continue walking */
base = ppn << PGSHIFT; base = ppn << PGSHIFT;
} else if ((pte & PTE_U) ? (mode == PRV_S) && !sum : !(mode == PRV_S)) { } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
break; /* Reserved leaf PTE flags: PTE_W */
} else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) { return TRANSLATE_FAIL;
break; } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
} else if (access_type == MMU_INST_FETCH ? !(pte & PTE_X) : /* Reserved leaf PTE flags: PTE_W + PTE_X */
access_type == MMU_DATA_LOAD ? !(pte & PTE_R) && return TRANSLATE_FAIL;
!(mxr && (pte & PTE_X)) : !((pte & PTE_R) && (pte & PTE_W))) { } else if ((pte & PTE_U) && ((mode != PRV_U) &&
break; (!sum || access_type == MMU_INST_FETCH))) {
/* User PTE flags when not U mode and mstatus.SUM is not set,
or the access type is an instruction fetch */
return TRANSLATE_FAIL;
} else if (!(pte & PTE_U) && (mode != PRV_S)) {
/* Supervisor PTE flags when not S mode */
return TRANSLATE_FAIL;
} else if (ppn & ((1ULL << ptshift) - 1)) {
/* Misaligned PPN */
return TRANSLATE_FAIL;
} else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
((pte & PTE_X) && mxr))) {
/* Read access check failed */
return TRANSLATE_FAIL;
} else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
/* Write access check failed */
return TRANSLATE_FAIL;
} else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
/* Fetch access check failed */
return TRANSLATE_FAIL;
} else { } else {
/* if necessary, set accessed and dirty bits. */ /* if necessary, set accessed and dirty bits. */
target_ulong updated_pte = pte | PTE_A | target_ulong updated_pte = pte | PTE_A |
@ -202,16 +225,19 @@ restart:
/* Page table updates need to be atomic with MTTCG enabled */ /* Page table updates need to be atomic with MTTCG enabled */
if (updated_pte != pte) { if (updated_pte != pte) {
/* if accessed or dirty bits need updating, and the PTE is /*
* in RAM, then we do so atomically with a compare and swap. * - if accessed or dirty bits need updating, and the PTE is
* if the PTE is in IO space, then it can't be updated. * in RAM, then we do so atomically with a compare and swap.
* if the PTE changed, then we must re-walk the page table * - if the PTE is in IO space or ROM, then it can't be updated
as the PTE is no longer valid */ * and we return TRANSLATE_FAIL.
* - if the PTE changed by the time we went to update it, then
* it is no longer valid and we must re-walk the page table.
*/
MemoryRegion *mr; MemoryRegion *mr;
hwaddr l = sizeof(target_ulong), addr1; hwaddr l = sizeof(target_ulong), addr1;
mr = address_space_translate(cs->as, pte_addr, mr = address_space_translate(cs->as, pte_addr,
&addr1, &l, false, MEMTXATTRS_UNSPECIFIED); &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
if (memory_access_is_direct(mr, true)) { if (memory_region_is_ram(mr)) {
target_ulong *pte_pa = target_ulong *pte_pa =
qemu_map_ram_ptr(mr->ram_block, addr1); qemu_map_ram_ptr(mr->ram_block, addr1);
#if TCG_OVERSIZED_GUEST #if TCG_OVERSIZED_GUEST
@ -239,15 +265,15 @@ restart:
target_ulong vpn = addr >> PGSHIFT; target_ulong vpn = addr >> PGSHIFT;
*physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT;
if ((pte & PTE_R)) { /* set permissions on the TLB entry */
if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
*prot |= PAGE_READ; *prot |= PAGE_READ;
} }
if ((pte & PTE_X)) { if ((pte & PTE_X)) {
*prot |= PAGE_EXEC; *prot |= PAGE_EXEC;
} }
/* only add write permission on stores or if the page /* add write permission on stores or if the page is already dirty,
is already dirty, so that we don't miss further so that we TLB miss on later writes to update the dirty bit */
page table walks to update the dirty bit */
if ((pte & PTE_W) && if ((pte & PTE_W) &&
(access_type == MMU_DATA_STORE || (pte & PTE_D))) { (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
*prot |= PAGE_WRITE; *prot |= PAGE_WRITE;