mirror of https://github.com/xemu-project/xemu.git
target/arm: Split out get_phys_addr_disabled
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20221001162318.153420-19-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
2189c79858
commit
448e42fdc1
138
target/arm/ptw.c
138
target/arm/ptw.c
|
@ -2272,6 +2272,78 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* MMU disabled. S1 addresses within aa64 translation regimes are
|
||||||
|
* still checked for bounds -- see AArch64.S1DisabledOutput().
|
||||||
|
*/
|
||||||
|
static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
|
||||||
|
MMUAccessType access_type,
|
||||||
|
ARMMMUIdx mmu_idx, bool is_secure,
|
||||||
|
GetPhysAddrResult *result,
|
||||||
|
ARMMMUFaultInfo *fi)
|
||||||
|
{
|
||||||
|
uint64_t hcr;
|
||||||
|
uint8_t memattr;
|
||||||
|
|
||||||
|
if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
|
||||||
|
int r_el = regime_el(env, mmu_idx);
|
||||||
|
if (arm_el_is_aa64(env, r_el)) {
|
||||||
|
int pamax = arm_pamax(env_archcpu(env));
|
||||||
|
uint64_t tcr = env->cp15.tcr_el[r_el];
|
||||||
|
int addrtop, tbi;
|
||||||
|
|
||||||
|
tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
|
||||||
|
if (access_type == MMU_INST_FETCH) {
|
||||||
|
tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
|
||||||
|
}
|
||||||
|
tbi = (tbi >> extract64(address, 55, 1)) & 1;
|
||||||
|
addrtop = (tbi ? 55 : 63);
|
||||||
|
|
||||||
|
if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
|
||||||
|
fi->type = ARMFault_AddressSize;
|
||||||
|
fi->level = 0;
|
||||||
|
fi->stage2 = false;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When TBI is disabled, we've just validated that all of the
|
||||||
|
* bits above PAMax are zero, so logically we only need to
|
||||||
|
* clear the top byte for TBI. But it's clearer to follow
|
||||||
|
* the pseudocode set of addrdesc.paddress.
|
||||||
|
*/
|
||||||
|
address = extract64(address, 0, 52);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result->phys = address;
|
||||||
|
result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||||
|
result->page_size = TARGET_PAGE_SIZE;
|
||||||
|
|
||||||
|
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
|
||||||
|
hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
||||||
|
result->cacheattrs.shareability = 0;
|
||||||
|
result->cacheattrs.is_s2_format = false;
|
||||||
|
if (hcr & HCR_DC) {
|
||||||
|
if (hcr & HCR_DCT) {
|
||||||
|
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
|
||||||
|
} else {
|
||||||
|
memattr = 0xff; /* Normal, WB, RWA */
|
||||||
|
}
|
||||||
|
} else if (access_type == MMU_INST_FETCH) {
|
||||||
|
if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
|
||||||
|
memattr = 0xee; /* Normal, WT, RA, NT */
|
||||||
|
} else {
|
||||||
|
memattr = 0x44; /* Normal, NC, No */
|
||||||
|
}
|
||||||
|
result->cacheattrs.shareability = 2; /* outer sharable */
|
||||||
|
} else {
|
||||||
|
memattr = 0x00; /* Device, nGnRnE */
|
||||||
|
}
|
||||||
|
result->cacheattrs.attrs = memattr;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
|
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
|
||||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
bool is_secure, GetPhysAddrResult *result,
|
bool is_secure, GetPhysAddrResult *result,
|
||||||
|
@ -2431,71 +2503,9 @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
|
||||||
/* Definitely a real MMU, not an MPU */
|
/* Definitely a real MMU, not an MPU */
|
||||||
|
|
||||||
if (regime_translation_disabled(env, mmu_idx, is_secure)) {
|
if (regime_translation_disabled(env, mmu_idx, is_secure)) {
|
||||||
uint64_t hcr;
|
return get_phys_addr_disabled(env, address, access_type, mmu_idx,
|
||||||
uint8_t memattr;
|
is_secure, result, fi);
|
||||||
|
|
||||||
/*
|
|
||||||
* MMU disabled. S1 addresses within aa64 translation regimes are
|
|
||||||
* still checked for bounds -- see AArch64.TranslateAddressS1Off.
|
|
||||||
*/
|
|
||||||
if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
|
|
||||||
int r_el = regime_el(env, mmu_idx);
|
|
||||||
if (arm_el_is_aa64(env, r_el)) {
|
|
||||||
int pamax = arm_pamax(env_archcpu(env));
|
|
||||||
uint64_t tcr = env->cp15.tcr_el[r_el];
|
|
||||||
int addrtop, tbi;
|
|
||||||
|
|
||||||
tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
|
|
||||||
if (access_type == MMU_INST_FETCH) {
|
|
||||||
tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
|
|
||||||
}
|
|
||||||
tbi = (tbi >> extract64(address, 55, 1)) & 1;
|
|
||||||
addrtop = (tbi ? 55 : 63);
|
|
||||||
|
|
||||||
if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
|
|
||||||
fi->type = ARMFault_AddressSize;
|
|
||||||
fi->level = 0;
|
|
||||||
fi->stage2 = false;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* When TBI is disabled, we've just validated that all of the
|
|
||||||
* bits above PAMax are zero, so logically we only need to
|
|
||||||
* clear the top byte for TBI. But it's clearer to follow
|
|
||||||
* the pseudocode set of addrdesc.paddress.
|
|
||||||
*/
|
|
||||||
address = extract64(address, 0, 52);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result->phys = address;
|
|
||||||
result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
||||||
result->page_size = TARGET_PAGE_SIZE;
|
|
||||||
|
|
||||||
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
|
|
||||||
hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
|
||||||
result->cacheattrs.shareability = 0;
|
|
||||||
result->cacheattrs.is_s2_format = false;
|
|
||||||
if (hcr & HCR_DC) {
|
|
||||||
if (hcr & HCR_DCT) {
|
|
||||||
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
|
|
||||||
} else {
|
|
||||||
memattr = 0xff; /* Normal, WB, RWA */
|
|
||||||
}
|
|
||||||
} else if (access_type == MMU_INST_FETCH) {
|
|
||||||
if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
|
|
||||||
memattr = 0xee; /* Normal, WT, RA, NT */
|
|
||||||
} else {
|
|
||||||
memattr = 0x44; /* Normal, NC, No */
|
|
||||||
}
|
|
||||||
result->cacheattrs.shareability = 2; /* outer sharable */
|
|
||||||
} else {
|
|
||||||
memattr = 0x00; /* Device, nGnRnE */
|
|
||||||
}
|
|
||||||
result->cacheattrs.attrs = memattr;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (regime_using_lpae_format(env, mmu_idx)) {
|
if (regime_using_lpae_format(env, mmu_idx)) {
|
||||||
return get_phys_addr_lpae(env, address, access_type, mmu_idx,
|
return get_phys_addr_lpae(env, address, access_type, mmu_idx,
|
||||||
is_secure, false, result, fi);
|
is_secure, false, result, fi);
|
||||||
|
|
Loading…
Reference in New Issue