mirror of https://github.com/xemu-project/xemu.git
target/arm: Move arm_{ldl,ldq}_ptw to ptw.c
Move the ptw load functions, plus 3 common subroutines: S1_ptw_translate, ptw_attrs_are_device, and regime_translation_big_endian. This also allows get_phys_addr_lpae to become static again. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20220604040607.269301-17-richard.henderson@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
3283222acd
commit
11552bb0d9
|
@ -10475,12 +10475,6 @@ bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||||
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
|
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool regime_translation_big_endian(CPUARMState *env,
|
|
||||||
ARMMMUIdx mmu_idx)
|
|
||||||
{
|
|
||||||
return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return the TTBR associated with this translation regime */
|
/* Return the TTBR associated with this translation regime */
|
||||||
uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
|
uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
|
||||||
{
|
{
|
||||||
|
@ -10773,141 +10767,6 @@ int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
|
||||||
return prot_rw | PAGE_EXEC;
|
return prot_rw | PAGE_EXEC;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* For an S1 page table walk, the stage 1 attributes are always
|
|
||||||
* some form of "this is Normal memory". The combined S1+S2
|
|
||||||
* attributes are therefore only Device if stage 2 specifies Device.
|
|
||||||
* With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
|
|
||||||
* ie when cacheattrs.attrs bits [3:2] are 0b00.
|
|
||||||
* With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
|
|
||||||
* when cacheattrs.attrs bit [2] is 0.
|
|
||||||
*/
|
|
||||||
assert(cacheattrs.is_s2_format);
|
|
||||||
if (arm_hcr_el2_eff(env) & HCR_FWB) {
|
|
||||||
return (cacheattrs.attrs & 0x4) == 0;
|
|
||||||
} else {
|
|
||||||
return (cacheattrs.attrs & 0xc) == 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Translate a S1 pagetable walk through S2 if needed. */
|
|
||||||
static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|
||||||
hwaddr addr, bool *is_secure,
|
|
||||||
ARMMMUFaultInfo *fi)
|
|
||||||
{
|
|
||||||
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
|
|
||||||
!regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
|
|
||||||
target_ulong s2size;
|
|
||||||
hwaddr s2pa;
|
|
||||||
int s2prot;
|
|
||||||
int ret;
|
|
||||||
ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
|
|
||||||
: ARMMMUIdx_Stage2;
|
|
||||||
ARMCacheAttrs cacheattrs = {};
|
|
||||||
MemTxAttrs txattrs = {};
|
|
||||||
|
|
||||||
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
|
|
||||||
&s2pa, &txattrs, &s2prot, &s2size, fi,
|
|
||||||
&cacheattrs);
|
|
||||||
if (ret) {
|
|
||||||
assert(fi->type != ARMFault_None);
|
|
||||||
fi->s2addr = addr;
|
|
||||||
fi->stage2 = true;
|
|
||||||
fi->s1ptw = true;
|
|
||||||
fi->s1ns = !*is_secure;
|
|
||||||
return ~0;
|
|
||||||
}
|
|
||||||
if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
|
|
||||||
ptw_attrs_are_device(env, cacheattrs)) {
|
|
||||||
/*
|
|
||||||
* PTW set and S1 walk touched S2 Device memory:
|
|
||||||
* generate Permission fault.
|
|
||||||
*/
|
|
||||||
fi->type = ARMFault_Permission;
|
|
||||||
fi->s2addr = addr;
|
|
||||||
fi->stage2 = true;
|
|
||||||
fi->s1ptw = true;
|
|
||||||
fi->s1ns = !*is_secure;
|
|
||||||
return ~0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (arm_is_secure_below_el3(env)) {
|
|
||||||
/* Check if page table walk is to secure or non-secure PA space. */
|
|
||||||
if (*is_secure) {
|
|
||||||
*is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
|
|
||||||
} else {
|
|
||||||
*is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert(!*is_secure);
|
|
||||||
}
|
|
||||||
|
|
||||||
addr = s2pa;
|
|
||||||
}
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* All loads done in the course of a page table walk go through here. */
|
|
||||||
uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
|
|
||||||
ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
|
|
||||||
{
|
|
||||||
ARMCPU *cpu = ARM_CPU(cs);
|
|
||||||
CPUARMState *env = &cpu->env;
|
|
||||||
MemTxAttrs attrs = {};
|
|
||||||
MemTxResult result = MEMTX_OK;
|
|
||||||
AddressSpace *as;
|
|
||||||
uint32_t data;
|
|
||||||
|
|
||||||
addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
|
|
||||||
attrs.secure = is_secure;
|
|
||||||
as = arm_addressspace(cs, attrs);
|
|
||||||
if (fi->s1ptw) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (regime_translation_big_endian(env, mmu_idx)) {
|
|
||||||
data = address_space_ldl_be(as, addr, attrs, &result);
|
|
||||||
} else {
|
|
||||||
data = address_space_ldl_le(as, addr, attrs, &result);
|
|
||||||
}
|
|
||||||
if (result == MEMTX_OK) {
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
fi->type = ARMFault_SyncExternalOnWalk;
|
|
||||||
fi->ea = arm_extabort_type(result);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
|
|
||||||
ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
|
|
||||||
{
|
|
||||||
ARMCPU *cpu = ARM_CPU(cs);
|
|
||||||
CPUARMState *env = &cpu->env;
|
|
||||||
MemTxAttrs attrs = {};
|
|
||||||
MemTxResult result = MEMTX_OK;
|
|
||||||
AddressSpace *as;
|
|
||||||
uint64_t data;
|
|
||||||
|
|
||||||
addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
|
|
||||||
attrs.secure = is_secure;
|
|
||||||
as = arm_addressspace(cs, attrs);
|
|
||||||
if (fi->s1ptw) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (regime_translation_big_endian(env, mmu_idx)) {
|
|
||||||
data = address_space_ldq_be(as, addr, attrs, &result);
|
|
||||||
} else {
|
|
||||||
data = address_space_ldq_le(as, addr, attrs, &result);
|
|
||||||
}
|
|
||||||
if (result == MEMTX_OK) {
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
fi->type = ARMFault_SyncExternalOnWalk;
|
|
||||||
fi->ea = arm_extabort_type(result);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* check_s2_mmu_setup
|
* check_s2_mmu_setup
|
||||||
* @cpu: ARMCPU
|
* @cpu: ARMCPU
|
||||||
|
|
160
target/arm/ptw.c
160
target/arm/ptw.c
|
@ -15,6 +15,154 @@
|
||||||
#include "ptw.h"
|
#include "ptw.h"
|
||||||
|
|
||||||
|
|
||||||
|
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
||||||
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
|
bool s1_is_el0, hwaddr *phys_ptr,
|
||||||
|
MemTxAttrs *txattrs, int *prot,
|
||||||
|
target_ulong *page_size_ptr,
|
||||||
|
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||||
|
__attribute__((nonnull));
|
||||||
|
|
||||||
|
static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||||
|
{
|
||||||
|
return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* For an S1 page table walk, the stage 1 attributes are always
|
||||||
|
* some form of "this is Normal memory". The combined S1+S2
|
||||||
|
* attributes are therefore only Device if stage 2 specifies Device.
|
||||||
|
* With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
|
||||||
|
* ie when cacheattrs.attrs bits [3:2] are 0b00.
|
||||||
|
* With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
|
||||||
|
* when cacheattrs.attrs bit [2] is 0.
|
||||||
|
*/
|
||||||
|
assert(cacheattrs.is_s2_format);
|
||||||
|
if (arm_hcr_el2_eff(env) & HCR_FWB) {
|
||||||
|
return (cacheattrs.attrs & 0x4) == 0;
|
||||||
|
} else {
|
||||||
|
return (cacheattrs.attrs & 0xc) == 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Translate a S1 pagetable walk through S2 if needed. */
|
||||||
|
static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||||
|
hwaddr addr, bool *is_secure,
|
||||||
|
ARMMMUFaultInfo *fi)
|
||||||
|
{
|
||||||
|
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
|
||||||
|
!regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
|
||||||
|
target_ulong s2size;
|
||||||
|
hwaddr s2pa;
|
||||||
|
int s2prot;
|
||||||
|
int ret;
|
||||||
|
ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
|
||||||
|
: ARMMMUIdx_Stage2;
|
||||||
|
ARMCacheAttrs cacheattrs = {};
|
||||||
|
MemTxAttrs txattrs = {};
|
||||||
|
|
||||||
|
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
|
||||||
|
&s2pa, &txattrs, &s2prot, &s2size, fi,
|
||||||
|
&cacheattrs);
|
||||||
|
if (ret) {
|
||||||
|
assert(fi->type != ARMFault_None);
|
||||||
|
fi->s2addr = addr;
|
||||||
|
fi->stage2 = true;
|
||||||
|
fi->s1ptw = true;
|
||||||
|
fi->s1ns = !*is_secure;
|
||||||
|
return ~0;
|
||||||
|
}
|
||||||
|
if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
|
||||||
|
ptw_attrs_are_device(env, cacheattrs)) {
|
||||||
|
/*
|
||||||
|
* PTW set and S1 walk touched S2 Device memory:
|
||||||
|
* generate Permission fault.
|
||||||
|
*/
|
||||||
|
fi->type = ARMFault_Permission;
|
||||||
|
fi->s2addr = addr;
|
||||||
|
fi->stage2 = true;
|
||||||
|
fi->s1ptw = true;
|
||||||
|
fi->s1ns = !*is_secure;
|
||||||
|
return ~0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arm_is_secure_below_el3(env)) {
|
||||||
|
/* Check if page table walk is to secure or non-secure PA space. */
|
||||||
|
if (*is_secure) {
|
||||||
|
*is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
|
||||||
|
} else {
|
||||||
|
*is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert(!*is_secure);
|
||||||
|
}
|
||||||
|
|
||||||
|
addr = s2pa;
|
||||||
|
}
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* All loads done in the course of a page table walk go through here. */
|
||||||
|
static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
|
||||||
|
ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
|
||||||
|
{
|
||||||
|
ARMCPU *cpu = ARM_CPU(cs);
|
||||||
|
CPUARMState *env = &cpu->env;
|
||||||
|
MemTxAttrs attrs = {};
|
||||||
|
MemTxResult result = MEMTX_OK;
|
||||||
|
AddressSpace *as;
|
||||||
|
uint32_t data;
|
||||||
|
|
||||||
|
addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
|
||||||
|
attrs.secure = is_secure;
|
||||||
|
as = arm_addressspace(cs, attrs);
|
||||||
|
if (fi->s1ptw) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (regime_translation_big_endian(env, mmu_idx)) {
|
||||||
|
data = address_space_ldl_be(as, addr, attrs, &result);
|
||||||
|
} else {
|
||||||
|
data = address_space_ldl_le(as, addr, attrs, &result);
|
||||||
|
}
|
||||||
|
if (result == MEMTX_OK) {
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
fi->type = ARMFault_SyncExternalOnWalk;
|
||||||
|
fi->ea = arm_extabort_type(result);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
|
||||||
|
ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
|
||||||
|
{
|
||||||
|
ARMCPU *cpu = ARM_CPU(cs);
|
||||||
|
CPUARMState *env = &cpu->env;
|
||||||
|
MemTxAttrs attrs = {};
|
||||||
|
MemTxResult result = MEMTX_OK;
|
||||||
|
AddressSpace *as;
|
||||||
|
uint64_t data;
|
||||||
|
|
||||||
|
addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
|
||||||
|
attrs.secure = is_secure;
|
||||||
|
as = arm_addressspace(cs, attrs);
|
||||||
|
if (fi->s1ptw) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (regime_translation_big_endian(env, mmu_idx)) {
|
||||||
|
data = address_space_ldq_be(as, addr, attrs, &result);
|
||||||
|
} else {
|
||||||
|
data = address_space_ldq_le(as, addr, attrs, &result);
|
||||||
|
}
|
||||||
|
if (result == MEMTX_OK) {
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
fi->type = ARMFault_SyncExternalOnWalk;
|
||||||
|
fi->ea = arm_extabort_type(result);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
|
static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||||
uint32_t *table, uint32_t address)
|
uint32_t *table, uint32_t address)
|
||||||
{
|
{
|
||||||
|
@ -338,12 +486,12 @@ do_fault:
|
||||||
* @fi: set to fault info if the translation fails
|
* @fi: set to fault info if the translation fails
|
||||||
* @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
|
* @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
|
||||||
*/
|
*/
|
||||||
bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
||||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
bool s1_is_el0,
|
bool s1_is_el0, hwaddr *phys_ptr,
|
||||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
MemTxAttrs *txattrs, int *prot,
|
||||||
target_ulong *page_size_ptr,
|
target_ulong *page_size_ptr,
|
||||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||||
{
|
{
|
||||||
ARMCPU *cpu = env_archcpu(env);
|
ARMCPU *cpu = env_archcpu(env);
|
||||||
CPUState *cs = CPU(cpu);
|
CPUState *cs = CPU(cpu);
|
||||||
|
|
|
@ -13,11 +13,6 @@
|
||||||
|
|
||||||
extern const uint8_t pamax_map[7];
|
extern const uint8_t pamax_map[7];
|
||||||
|
|
||||||
uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
|
|
||||||
ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi);
|
|
||||||
uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
|
|
||||||
ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi);
|
|
||||||
|
|
||||||
bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx);
|
bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx);
|
||||||
bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx);
|
bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx);
|
||||||
uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn);
|
uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn);
|
||||||
|
@ -40,13 +35,5 @@ int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0);
|
||||||
int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
|
int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
|
||||||
int ap, int ns, int xn, int pxn);
|
int ap, int ns, int xn, int pxn);
|
||||||
|
|
||||||
bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
|
||||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
||||||
bool s1_is_el0,
|
|
||||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
|
||||||
target_ulong *page_size_ptr,
|
|
||||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
|
||||||
__attribute__((nonnull));
|
|
||||||
|
|
||||||
#endif /* !CONFIG_USER_ONLY */
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
#endif /* TARGET_ARM_PTW_H */
|
#endif /* TARGET_ARM_PTW_H */
|
||||||
|
|
Loading…
Reference in New Issue