mirror of https://github.com/xemu-project/xemu.git
target/arm: Move get_phys_addr to ptw.c
Begin moving all of the page table walking functions out of helper.c, starting with get_phys_addr(). Create a temporary header file, "ptw.h", in which to share declarations between the two C files while we are moving functions. Move a few declarations to "internals.h", which will remain used by multiple C files. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20220604040607.269301-3-richard.henderson@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
d8cca960a9
commit
8ae0886002
|
@ -37,22 +37,11 @@
|
|||
#include "semihosting/common-semi.h"
|
||||
#endif
|
||||
#include "cpregs.h"
|
||||
#include "ptw.h"
|
||||
|
||||
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool s1_is_el0,
|
||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
||||
target_ulong *page_size_ptr,
|
||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||
__attribute__((nonnull));
|
||||
#endif
|
||||
|
||||
static void switch_mode(CPUARMState *env, int mode);
|
||||
static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
|
||||
|
||||
static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
|
@ -10440,17 +10429,10 @@ uint64_t arm_sctlr(CPUARMState *env, int el)
|
|||
return env->cp15.sctlr_el[el];
|
||||
}
|
||||
|
||||
/* Return the SCTLR value which controls this address translation regime */
|
||||
static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
/* Return true if the specified stage of address translation is disabled */
|
||||
static inline bool regime_translation_disabled(CPUARMState *env,
|
||||
ARMMMUIdx mmu_idx)
|
||||
bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
uint64_t hcr_el2;
|
||||
|
||||
|
@ -10542,8 +10524,7 @@ ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
|
|||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
/* Return true if the translation regime is using LPAE format page tables */
|
||||
static inline bool regime_using_lpae_format(CPUARMState *env,
|
||||
ARMMMUIdx mmu_idx)
|
||||
bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
int el = regime_el(env, mmu_idx);
|
||||
if (el == 2 || arm_el_is_aa64(env, el)) {
|
||||
|
@ -10567,7 +10548,7 @@ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_SE10_0:
|
||||
|
@ -10959,11 +10940,11 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi)
|
||||
bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
int level = 1;
|
||||
|
@ -11081,10 +11062,10 @@ do_fault:
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||
target_ulong *page_size, ARMMMUFaultInfo *fi)
|
||||
bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||
target_ulong *page_size, ARMMMUFaultInfo *fi)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
|
@ -11360,7 +11341,7 @@ unsigned int arm_pamax(ARMCPU *cpu)
|
|||
return pamax_map[parange];
|
||||
}
|
||||
|
||||
static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
|
||||
int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
if (regime_has_2_ranges(mmu_idx)) {
|
||||
return extract64(tcr, 37, 2);
|
||||
|
@ -11372,7 +11353,7 @@ static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
|
|||
}
|
||||
}
|
||||
|
||||
static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
|
||||
int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
if (regime_has_2_ranges(mmu_idx)) {
|
||||
return extract64(tcr, 51, 2);
|
||||
|
@ -11602,12 +11583,12 @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
|
|||
* @fi: set to fault info if the translation fails
|
||||
* @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
|
||||
*/
|
||||
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool s1_is_el0,
|
||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
||||
target_ulong *page_size_ptr,
|
||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||
bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool s1_is_el0,
|
||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
||||
target_ulong *page_size_ptr,
|
||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
@ -12055,11 +12036,11 @@ static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
|
|||
return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
|
||||
}
|
||||
|
||||
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi)
|
||||
bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
int n;
|
||||
|
@ -12501,11 +12482,11 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|||
}
|
||||
|
||||
|
||||
static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *txattrs,
|
||||
int *prot, target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi)
|
||||
bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *txattrs,
|
||||
int *prot, target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
uint32_t secure = regime_is_secure(env, mmu_idx);
|
||||
V8M_SAttributes sattrs = {};
|
||||
|
@ -12575,10 +12556,10 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
ARMMMUFaultInfo *fi)
|
||||
bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
int n;
|
||||
uint32_t mask;
|
||||
|
@ -12795,8 +12776,8 @@ static uint8_t combined_attrs_fwb(CPUARMState *env,
|
|||
* @s1: Attributes from stage 1 walk
|
||||
* @s2: Attributes from stage 2 walk
|
||||
*/
|
||||
static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
|
||||
ARMCacheAttrs s1, ARMCacheAttrs s2)
|
||||
ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
|
||||
ARMCacheAttrs s1, ARMCacheAttrs s2)
|
||||
{
|
||||
ARMCacheAttrs ret;
|
||||
bool tagged = false;
|
||||
|
@ -12848,256 +12829,6 @@ static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/* get_phys_addr - get the physical address for this virtual address
|
||||
*
|
||||
* Find the physical address corresponding to the given virtual address,
|
||||
* by doing a translation table walk on MMU based systems or using the
|
||||
* MPU state on MPU based systems.
|
||||
*
|
||||
* Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
|
||||
* prot and page_size may not be filled in, and the populated fsr value provides
|
||||
* information on why the translation aborted, in the format of a
|
||||
* DFSR/IFSR fault register, with the following caveats:
|
||||
* * we honour the short vs long DFSR format differences.
|
||||
* * the WnR bit is never set (the caller must do this).
|
||||
* * for PSMAv5 based systems we don't bother to return a full FSR format
|
||||
* value.
|
||||
*
|
||||
* @env: CPUARMState
|
||||
* @address: virtual address to get physical address for
|
||||
* @access_type: 0 for read, 1 for write, 2 for execute
|
||||
* @mmu_idx: MMU index indicating required translation regime
|
||||
* @phys_ptr: set to the physical address corresponding to the virtual address
|
||||
* @attrs: set to the memory transaction attributes to use
|
||||
* @prot: set to the permissions for the page containing phys_ptr
|
||||
* @page_size: set to the size of the page containing phys_ptr
|
||||
* @fi: set to fault info if the translation fails
|
||||
* @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
|
||||
*/
|
||||
bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||
target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||
{
|
||||
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
|
||||
|
||||
if (mmu_idx != s1_mmu_idx) {
|
||||
/* Call ourselves recursively to do the stage 1 and then stage 2
|
||||
* translations if mmu_idx is a two-stage regime.
|
||||
*/
|
||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
hwaddr ipa;
|
||||
int s2_prot;
|
||||
int ret;
|
||||
bool ipa_secure;
|
||||
ARMCacheAttrs cacheattrs2 = {};
|
||||
ARMMMUIdx s2_mmu_idx;
|
||||
bool is_el0;
|
||||
|
||||
ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
|
||||
attrs, prot, page_size, fi, cacheattrs);
|
||||
|
||||
/* If S1 fails or S2 is disabled, return early. */
|
||||
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
|
||||
*phys_ptr = ipa;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ipa_secure = attrs->secure;
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
if (ipa_secure) {
|
||||
attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
|
||||
} else {
|
||||
attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
|
||||
}
|
||||
} else {
|
||||
assert(!ipa_secure);
|
||||
}
|
||||
|
||||
s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
||||
is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
|
||||
|
||||
/* S1 is done. Now do S2 translation. */
|
||||
ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
|
||||
phys_ptr, attrs, &s2_prot,
|
||||
page_size, fi, &cacheattrs2);
|
||||
fi->s2addr = ipa;
|
||||
/* Combine the S1 and S2 perms. */
|
||||
*prot &= s2_prot;
|
||||
|
||||
/* If S2 fails, return early. */
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Combine the S1 and S2 cache attributes. */
|
||||
if (arm_hcr_el2_eff(env) & HCR_DC) {
|
||||
/*
|
||||
* HCR.DC forces the first stage attributes to
|
||||
* Normal Non-Shareable,
|
||||
* Inner Write-Back Read-Allocate Write-Allocate,
|
||||
* Outer Write-Back Read-Allocate Write-Allocate.
|
||||
* Do not overwrite Tagged within attrs.
|
||||
*/
|
||||
if (cacheattrs->attrs != 0xf0) {
|
||||
cacheattrs->attrs = 0xff;
|
||||
}
|
||||
cacheattrs->shareability = 0;
|
||||
}
|
||||
*cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
|
||||
|
||||
/* Check if IPA translates to secure or non-secure PA space. */
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
if (ipa_secure) {
|
||||
attrs->secure =
|
||||
!(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
|
||||
} else {
|
||||
attrs->secure =
|
||||
!((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
|
||||
|| (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
/*
|
||||
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
|
||||
*/
|
||||
mmu_idx = stage_1_mmu_idx(mmu_idx);
|
||||
}
|
||||
}
|
||||
|
||||
/* The page table entries may downgrade secure to non-secure, but
|
||||
* cannot upgrade an non-secure translation regime's attributes
|
||||
* to secure.
|
||||
*/
|
||||
attrs->secure = regime_is_secure(env, mmu_idx);
|
||||
attrs->user = regime_is_user(env, mmu_idx);
|
||||
|
||||
/* Fast Context Switch Extension. This doesn't exist at all in v8.
|
||||
* In v7 and earlier it affects all stage 1 translations.
|
||||
*/
|
||||
if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
|
||||
&& !arm_feature(env, ARM_FEATURE_V8)) {
|
||||
if (regime_el(env, mmu_idx) == 3) {
|
||||
address += env->cp15.fcseidr_s;
|
||||
} else {
|
||||
address += env->cp15.fcseidr_ns;
|
||||
}
|
||||
}
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_PMSA)) {
|
||||
bool ret;
|
||||
*page_size = TARGET_PAGE_SIZE;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
/* PMSAv8 */
|
||||
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
|
||||
phys_ptr, attrs, prot, page_size, fi);
|
||||
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
||||
/* PMSAv7 */
|
||||
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
|
||||
phys_ptr, prot, page_size, fi);
|
||||
} else {
|
||||
/* Pre-v7 MPU */
|
||||
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
|
||||
phys_ptr, prot, fi);
|
||||
}
|
||||
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
|
||||
" mmu_idx %u -> %s (prot %c%c%c)\n",
|
||||
access_type == MMU_DATA_LOAD ? "reading" :
|
||||
(access_type == MMU_DATA_STORE ? "writing" : "execute"),
|
||||
(uint32_t)address, mmu_idx,
|
||||
ret ? "Miss" : "Hit",
|
||||
*prot & PAGE_READ ? 'r' : '-',
|
||||
*prot & PAGE_WRITE ? 'w' : '-',
|
||||
*prot & PAGE_EXEC ? 'x' : '-');
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Definitely a real MMU, not an MPU */
|
||||
|
||||
if (regime_translation_disabled(env, mmu_idx)) {
|
||||
uint64_t hcr;
|
||||
uint8_t memattr;
|
||||
|
||||
/*
|
||||
* MMU disabled. S1 addresses within aa64 translation regimes are
|
||||
* still checked for bounds -- see AArch64.TranslateAddressS1Off.
|
||||
*/
|
||||
if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
|
||||
int r_el = regime_el(env, mmu_idx);
|
||||
if (arm_el_is_aa64(env, r_el)) {
|
||||
int pamax = arm_pamax(env_archcpu(env));
|
||||
uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
|
||||
int addrtop, tbi;
|
||||
|
||||
tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
|
||||
if (access_type == MMU_INST_FETCH) {
|
||||
tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
|
||||
}
|
||||
tbi = (tbi >> extract64(address, 55, 1)) & 1;
|
||||
addrtop = (tbi ? 55 : 63);
|
||||
|
||||
if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
|
||||
fi->type = ARMFault_AddressSize;
|
||||
fi->level = 0;
|
||||
fi->stage2 = false;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* When TBI is disabled, we've just validated that all of the
|
||||
* bits above PAMax are zero, so logically we only need to
|
||||
* clear the top byte for TBI. But it's clearer to follow
|
||||
* the pseudocode set of addrdesc.paddress.
|
||||
*/
|
||||
address = extract64(address, 0, 52);
|
||||
}
|
||||
}
|
||||
*phys_ptr = address;
|
||||
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
*page_size = TARGET_PAGE_SIZE;
|
||||
|
||||
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
|
||||
hcr = arm_hcr_el2_eff(env);
|
||||
cacheattrs->shareability = 0;
|
||||
cacheattrs->is_s2_format = false;
|
||||
if (hcr & HCR_DC) {
|
||||
if (hcr & HCR_DCT) {
|
||||
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
|
||||
} else {
|
||||
memattr = 0xff; /* Normal, WB, RWA */
|
||||
}
|
||||
} else if (access_type == MMU_INST_FETCH) {
|
||||
if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
|
||||
memattr = 0xee; /* Normal, WT, RA, NT */
|
||||
} else {
|
||||
memattr = 0x44; /* Normal, NC, No */
|
||||
}
|
||||
cacheattrs->shareability = 2; /* outer sharable */
|
||||
} else {
|
||||
memattr = 0x00; /* Device, nGnRnE */
|
||||
}
|
||||
cacheattrs->attrs = memattr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (regime_using_lpae_format(env, mmu_idx)) {
|
||||
return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
|
||||
phys_ptr, attrs, prot, page_size,
|
||||
fi, cacheattrs);
|
||||
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
|
||||
return get_phys_addr_v6(env, address, access_type, mmu_idx,
|
||||
phys_ptr, attrs, prot, page_size, fi);
|
||||
} else {
|
||||
return get_phys_addr_v5(env, address, access_type, mmu_idx,
|
||||
phys_ptr, prot, page_size, fi);
|
||||
}
|
||||
}
|
||||
|
||||
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
||||
MemTxAttrs *attrs)
|
||||
{
|
||||
|
@ -13121,7 +12852,6 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
|||
}
|
||||
return phys_addr;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Note that signed overflow is undefined in C. The following routines are
|
||||
|
|
|
@ -613,8 +613,13 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
|
|||
/* Return the MMU index for a v7M CPU in the specified security state */
|
||||
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
|
||||
|
||||
/* Return true if the stage 1 translation regime is using LPAE format page
|
||||
* tables */
|
||||
/* Return true if the translation regime is using LPAE format page tables */
|
||||
bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
|
||||
|
||||
/*
|
||||
* Return true if the stage 1 translation regime is using LPAE
|
||||
* format page tables
|
||||
*/
|
||||
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
|
||||
|
||||
/* Raise a data fault alignment exception for the specified virtual address */
|
||||
|
@ -777,6 +782,12 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|||
}
|
||||
}
|
||||
|
||||
/* Return the SCTLR value which controls this address translation regime */
|
||||
static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
|
||||
}
|
||||
|
||||
/* Return the TCR controlling this translation regime */
|
||||
static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||
{
|
||||
|
@ -1095,6 +1106,9 @@ typedef struct ARMVAParameters {
|
|||
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
|
||||
ARMMMUIdx mmu_idx, bool data);
|
||||
|
||||
int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
|
||||
int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
|
||||
|
||||
static inline int exception_target_el(CPUARMState *env)
|
||||
{
|
||||
int target_el = MAX(1, arm_current_el(env));
|
||||
|
|
|
@ -58,6 +58,7 @@ arm_softmmu_ss.add(files(
|
|||
'machine.c',
|
||||
'monitor.c',
|
||||
'psci.c',
|
||||
'ptw.c',
|
||||
))
|
||||
|
||||
subdir('hvf')
|
||||
|
|
|
@ -0,0 +1,267 @@
|
|||
/*
|
||||
* ARM page table walking.
|
||||
*
|
||||
* This code is licensed under the GNU GPL v2 or later.
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "cpu.h"
|
||||
#include "internals.h"
|
||||
#include "ptw.h"
|
||||
|
||||
|
||||
/**
|
||||
* get_phys_addr - get the physical address for this virtual address
|
||||
*
|
||||
* Find the physical address corresponding to the given virtual address,
|
||||
* by doing a translation table walk on MMU based systems or using the
|
||||
* MPU state on MPU based systems.
|
||||
*
|
||||
* Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
|
||||
* prot and page_size may not be filled in, and the populated fsr value provides
|
||||
* information on why the translation aborted, in the format of a
|
||||
* DFSR/IFSR fault register, with the following caveats:
|
||||
* * we honour the short vs long DFSR format differences.
|
||||
* * the WnR bit is never set (the caller must do this).
|
||||
* * for PSMAv5 based systems we don't bother to return a full FSR format
|
||||
* value.
|
||||
*
|
||||
* @env: CPUARMState
|
||||
* @address: virtual address to get physical address for
|
||||
* @access_type: 0 for read, 1 for write, 2 for execute
|
||||
* @mmu_idx: MMU index indicating required translation regime
|
||||
* @phys_ptr: set to the physical address corresponding to the virtual address
|
||||
* @attrs: set to the memory transaction attributes to use
|
||||
* @prot: set to the permissions for the page containing phys_ptr
|
||||
* @page_size: set to the size of the page containing phys_ptr
|
||||
* @fi: set to fault info if the translation fails
|
||||
* @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
|
||||
*/
|
||||
bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||
target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||
{
|
||||
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
|
||||
|
||||
if (mmu_idx != s1_mmu_idx) {
|
||||
/*
|
||||
* Call ourselves recursively to do the stage 1 and then stage 2
|
||||
* translations if mmu_idx is a two-stage regime.
|
||||
*/
|
||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
hwaddr ipa;
|
||||
int s2_prot;
|
||||
int ret;
|
||||
bool ipa_secure;
|
||||
ARMCacheAttrs cacheattrs2 = {};
|
||||
ARMMMUIdx s2_mmu_idx;
|
||||
bool is_el0;
|
||||
|
||||
ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
|
||||
attrs, prot, page_size, fi, cacheattrs);
|
||||
|
||||
/* If S1 fails or S2 is disabled, return early. */
|
||||
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
|
||||
*phys_ptr = ipa;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ipa_secure = attrs->secure;
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
if (ipa_secure) {
|
||||
attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
|
||||
} else {
|
||||
attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
|
||||
}
|
||||
} else {
|
||||
assert(!ipa_secure);
|
||||
}
|
||||
|
||||
s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
||||
is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
|
||||
|
||||
/* S1 is done. Now do S2 translation. */
|
||||
ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
|
||||
phys_ptr, attrs, &s2_prot,
|
||||
page_size, fi, &cacheattrs2);
|
||||
fi->s2addr = ipa;
|
||||
/* Combine the S1 and S2 perms. */
|
||||
*prot &= s2_prot;
|
||||
|
||||
/* If S2 fails, return early. */
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Combine the S1 and S2 cache attributes. */
|
||||
if (arm_hcr_el2_eff(env) & HCR_DC) {
|
||||
/*
|
||||
* HCR.DC forces the first stage attributes to
|
||||
* Normal Non-Shareable,
|
||||
* Inner Write-Back Read-Allocate Write-Allocate,
|
||||
* Outer Write-Back Read-Allocate Write-Allocate.
|
||||
* Do not overwrite Tagged within attrs.
|
||||
*/
|
||||
if (cacheattrs->attrs != 0xf0) {
|
||||
cacheattrs->attrs = 0xff;
|
||||
}
|
||||
cacheattrs->shareability = 0;
|
||||
}
|
||||
*cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
|
||||
|
||||
/* Check if IPA translates to secure or non-secure PA space. */
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
if (ipa_secure) {
|
||||
attrs->secure =
|
||||
!(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
|
||||
} else {
|
||||
attrs->secure =
|
||||
!((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
|
||||
|| (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
/*
|
||||
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
|
||||
*/
|
||||
mmu_idx = stage_1_mmu_idx(mmu_idx);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The page table entries may downgrade secure to non-secure, but
|
||||
* cannot upgrade an non-secure translation regime's attributes
|
||||
* to secure.
|
||||
*/
|
||||
attrs->secure = regime_is_secure(env, mmu_idx);
|
||||
attrs->user = regime_is_user(env, mmu_idx);
|
||||
|
||||
/*
|
||||
* Fast Context Switch Extension. This doesn't exist at all in v8.
|
||||
* In v7 and earlier it affects all stage 1 translations.
|
||||
*/
|
||||
if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
|
||||
&& !arm_feature(env, ARM_FEATURE_V8)) {
|
||||
if (regime_el(env, mmu_idx) == 3) {
|
||||
address += env->cp15.fcseidr_s;
|
||||
} else {
|
||||
address += env->cp15.fcseidr_ns;
|
||||
}
|
||||
}
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_PMSA)) {
|
||||
bool ret;
|
||||
*page_size = TARGET_PAGE_SIZE;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
/* PMSAv8 */
|
||||
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
|
||||
phys_ptr, attrs, prot, page_size, fi);
|
||||
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
||||
/* PMSAv7 */
|
||||
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
|
||||
phys_ptr, prot, page_size, fi);
|
||||
} else {
|
||||
/* Pre-v7 MPU */
|
||||
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
|
||||
phys_ptr, prot, fi);
|
||||
}
|
||||
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
|
||||
" mmu_idx %u -> %s (prot %c%c%c)\n",
|
||||
access_type == MMU_DATA_LOAD ? "reading" :
|
||||
(access_type == MMU_DATA_STORE ? "writing" : "execute"),
|
||||
(uint32_t)address, mmu_idx,
|
||||
ret ? "Miss" : "Hit",
|
||||
*prot & PAGE_READ ? 'r' : '-',
|
||||
*prot & PAGE_WRITE ? 'w' : '-',
|
||||
*prot & PAGE_EXEC ? 'x' : '-');
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Definitely a real MMU, not an MPU */
|
||||
|
||||
if (regime_translation_disabled(env, mmu_idx)) {
|
||||
uint64_t hcr;
|
||||
uint8_t memattr;
|
||||
|
||||
/*
|
||||
* MMU disabled. S1 addresses within aa64 translation regimes are
|
||||
* still checked for bounds -- see AArch64.TranslateAddressS1Off.
|
||||
*/
|
||||
if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
|
||||
int r_el = regime_el(env, mmu_idx);
|
||||
if (arm_el_is_aa64(env, r_el)) {
|
||||
int pamax = arm_pamax(env_archcpu(env));
|
||||
uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
|
||||
int addrtop, tbi;
|
||||
|
||||
tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
|
||||
if (access_type == MMU_INST_FETCH) {
|
||||
tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
|
||||
}
|
||||
tbi = (tbi >> extract64(address, 55, 1)) & 1;
|
||||
addrtop = (tbi ? 55 : 63);
|
||||
|
||||
if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
|
||||
fi->type = ARMFault_AddressSize;
|
||||
fi->level = 0;
|
||||
fi->stage2 = false;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* When TBI is disabled, we've just validated that all of the
|
||||
* bits above PAMax are zero, so logically we only need to
|
||||
* clear the top byte for TBI. But it's clearer to follow
|
||||
* the pseudocode set of addrdesc.paddress.
|
||||
*/
|
||||
address = extract64(address, 0, 52);
|
||||
}
|
||||
}
|
||||
*phys_ptr = address;
|
||||
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
*page_size = TARGET_PAGE_SIZE;
|
||||
|
||||
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
|
||||
hcr = arm_hcr_el2_eff(env);
|
||||
cacheattrs->shareability = 0;
|
||||
cacheattrs->is_s2_format = false;
|
||||
if (hcr & HCR_DC) {
|
||||
if (hcr & HCR_DCT) {
|
||||
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
|
||||
} else {
|
||||
memattr = 0xff; /* Normal, WB, RWA */
|
||||
}
|
||||
} else if (access_type == MMU_INST_FETCH) {
|
||||
if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
|
||||
memattr = 0xee; /* Normal, WT, RA, NT */
|
||||
} else {
|
||||
memattr = 0x44; /* Normal, NC, No */
|
||||
}
|
||||
cacheattrs->shareability = 2; /* outer sharable */
|
||||
} else {
|
||||
memattr = 0x00; /* Device, nGnRnE */
|
||||
}
|
||||
cacheattrs->attrs = memattr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (regime_using_lpae_format(env, mmu_idx)) {
|
||||
return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
|
||||
phys_ptr, attrs, prot, page_size,
|
||||
fi, cacheattrs);
|
||||
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
|
||||
return get_phys_addr_v6(env, address, access_type, mmu_idx,
|
||||
phys_ptr, attrs, prot, page_size, fi);
|
||||
} else {
|
||||
return get_phys_addr_v5(env, address, access_type, mmu_idx,
|
||||
phys_ptr, prot, page_size, fi);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* ARM page table walking.
|
||||
*
|
||||
* This code is licensed under the GNU GPL v2 or later.
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef TARGET_ARM_PTW_H
|
||||
#define TARGET_ARM_PTW_H
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx);
|
||||
bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx);
|
||||
ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
|
||||
ARMCacheAttrs s1, ARMCacheAttrs s2);
|
||||
|
||||
bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi);
|
||||
bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
ARMMMUFaultInfo *fi);
|
||||
bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||
target_ulong *page_size, ARMMMUFaultInfo *fi);
|
||||
bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi);
|
||||
bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *txattrs,
|
||||
int *prot, target_ulong *page_size,
|
||||
ARMMMUFaultInfo *fi);
|
||||
bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool s1_is_el0,
|
||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
||||
target_ulong *page_size_ptr,
|
||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||
__attribute__((nonnull));
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
#endif /* TARGET_ARM_PTW_H */
|
Loading…
Reference in New Issue