mirror of https://github.com/xemu-project/xemu.git
target/arm: Split out get_phys_addr_twostage
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20221011031911.2408754-12-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
f3639a64f6
commit
3f5a74c543
191
target/arm/ptw.c
191
target/arm/ptw.c
|
@ -31,6 +31,13 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
__attribute__((nonnull));
|
||||
|
||||
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
__attribute__((nonnull));
|
||||
|
||||
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
|
||||
static const uint8_t pamax_map[] = {
|
||||
[0] = 32,
|
||||
|
@ -2428,6 +2435,94 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
hwaddr ipa;
|
||||
int s1_prot;
|
||||
int ret;
|
||||
bool is_secure = ptw->in_secure;
|
||||
bool ipa_secure, s2walk_secure;
|
||||
ARMCacheAttrs cacheattrs1;
|
||||
bool is_el0;
|
||||
uint64_t hcr;
|
||||
|
||||
ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
|
||||
|
||||
/* If S1 fails or S2 is disabled, return early. */
|
||||
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ipa = result->f.phys_addr;
|
||||
ipa_secure = result->f.attrs.secure;
|
||||
if (is_secure) {
|
||||
/* Select TCR based on the NS bit from the S1 walk. */
|
||||
s2walk_secure = !(ipa_secure
|
||||
? env->cp15.vstcr_el2 & VSTCR_SW
|
||||
: env->cp15.vtcr_el2 & VTCR_NSW);
|
||||
} else {
|
||||
assert(!ipa_secure);
|
||||
s2walk_secure = false;
|
||||
}
|
||||
|
||||
is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
|
||||
ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
||||
ptw->in_secure = s2walk_secure;
|
||||
|
||||
/*
|
||||
* S1 is done, now do S2 translation.
|
||||
* Save the stage1 results so that we may merge prot and cacheattrs later.
|
||||
*/
|
||||
s1_prot = result->f.prot;
|
||||
cacheattrs1 = result->cacheattrs;
|
||||
memset(result, 0, sizeof(*result));
|
||||
|
||||
ret = get_phys_addr_lpae(env, ptw, ipa, access_type, is_el0, result, fi);
|
||||
fi->s2addr = ipa;
|
||||
|
||||
/* Combine the S1 and S2 perms. */
|
||||
result->f.prot &= s1_prot;
|
||||
|
||||
/* If S2 fails, return early. */
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Combine the S1 and S2 cache attributes. */
|
||||
hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
||||
if (hcr & HCR_DC) {
|
||||
/*
|
||||
* HCR.DC forces the first stage attributes to
|
||||
* Normal Non-Shareable,
|
||||
* Inner Write-Back Read-Allocate Write-Allocate,
|
||||
* Outer Write-Back Read-Allocate Write-Allocate.
|
||||
* Do not overwrite Tagged within attrs.
|
||||
*/
|
||||
if (cacheattrs1.attrs != 0xf0) {
|
||||
cacheattrs1.attrs = 0xff;
|
||||
}
|
||||
cacheattrs1.shareability = 0;
|
||||
}
|
||||
result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
|
||||
result->cacheattrs);
|
||||
|
||||
/*
|
||||
* Check if IPA translates to secure or non-secure PA space.
|
||||
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
|
||||
*/
|
||||
result->f.attrs.secure =
|
||||
(is_secure
|
||||
&& !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
|
||||
&& (ipa_secure
|
||||
|| !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
|
@ -2441,99 +2536,13 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
|||
if (mmu_idx != s1_mmu_idx) {
|
||||
/*
|
||||
* Call ourselves recursively to do the stage 1 and then stage 2
|
||||
* translations if mmu_idx is a two-stage regime.
|
||||
* translations if mmu_idx is a two-stage regime, and EL2 present.
|
||||
* Otherwise, a stage1+stage2 translation is just stage 1.
|
||||
*/
|
||||
ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
|
||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
hwaddr ipa;
|
||||
int s1_prot;
|
||||
int ret;
|
||||
bool ipa_secure, s2walk_secure;
|
||||
ARMCacheAttrs cacheattrs1;
|
||||
bool is_el0;
|
||||
uint64_t hcr;
|
||||
|
||||
ptw->in_mmu_idx = s1_mmu_idx;
|
||||
ret = get_phys_addr_with_struct(env, ptw, address, access_type,
|
||||
result, fi);
|
||||
|
||||
/* If S1 fails or S2 is disabled, return early. */
|
||||
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2,
|
||||
is_secure)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ipa = result->f.phys_addr;
|
||||
ipa_secure = result->f.attrs.secure;
|
||||
if (is_secure) {
|
||||
/* Select TCR based on the NS bit from the S1 walk. */
|
||||
s2walk_secure = !(ipa_secure
|
||||
? env->cp15.vstcr_el2 & VSTCR_SW
|
||||
: env->cp15.vtcr_el2 & VTCR_NSW);
|
||||
} else {
|
||||
assert(!ipa_secure);
|
||||
s2walk_secure = false;
|
||||
}
|
||||
|
||||
ptw->in_mmu_idx =
|
||||
s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
||||
ptw->in_secure = s2walk_secure;
|
||||
is_el0 = mmu_idx == ARMMMUIdx_E10_0;
|
||||
|
||||
/*
|
||||
* S1 is done, now do S2 translation.
|
||||
* Save the stage1 results so that we may merge
|
||||
* prot and cacheattrs later.
|
||||
*/
|
||||
s1_prot = result->f.prot;
|
||||
cacheattrs1 = result->cacheattrs;
|
||||
memset(result, 0, sizeof(*result));
|
||||
|
||||
ret = get_phys_addr_lpae(env, ptw, ipa, access_type,
|
||||
is_el0, result, fi);
|
||||
fi->s2addr = ipa;
|
||||
|
||||
/* Combine the S1 and S2 perms. */
|
||||
result->f.prot &= s1_prot;
|
||||
|
||||
/* If S2 fails, return early. */
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Combine the S1 and S2 cache attributes. */
|
||||
hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
||||
if (hcr & HCR_DC) {
|
||||
/*
|
||||
* HCR.DC forces the first stage attributes to
|
||||
* Normal Non-Shareable,
|
||||
* Inner Write-Back Read-Allocate Write-Allocate,
|
||||
* Outer Write-Back Read-Allocate Write-Allocate.
|
||||
* Do not overwrite Tagged within attrs.
|
||||
*/
|
||||
if (cacheattrs1.attrs != 0xf0) {
|
||||
cacheattrs1.attrs = 0xff;
|
||||
}
|
||||
cacheattrs1.shareability = 0;
|
||||
}
|
||||
result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
|
||||
result->cacheattrs);
|
||||
|
||||
/*
|
||||
* Check if IPA translates to secure or non-secure PA space.
|
||||
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
|
||||
*/
|
||||
result->f.attrs.secure =
|
||||
(is_secure
|
||||
&& !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
|
||||
&& (ipa_secure
|
||||
|| !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
/*
|
||||
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
|
||||
*/
|
||||
mmu_idx = stage_1_mmu_idx(mmu_idx);
|
||||
return get_phys_addr_twostage(env, ptw, address, access_type,
|
||||
result, fi);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue