mirror of https://github.com/xemu-project/xemu.git
target/arm: Always pass cacheattr to get_phys_addr
We need to check the memattr of a page in order to determine whether it is Tagged for MTE. Between Stage1 and Stage2, this becomes simpler if we always collect this data, instead of occasionally being presented with NULL. Use the nonnull attribute to allow the compiler to check that all pointer arguments are non-null. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20200626033144.790098-42-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
34669338bd
commit
7e98e21c09
|
@ -44,7 +44,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||||
bool s1_is_el0,
|
bool s1_is_el0,
|
||||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
||||||
target_ulong *page_size_ptr,
|
target_ulong *page_size_ptr,
|
||||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
|
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||||
|
__attribute__((nonnull));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void switch_mode(CPUARMState *env, int mode);
|
static void switch_mode(CPUARMState *env, int mode);
|
||||||
|
@ -11101,10 +11102,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||||
arm_tlb_bti_gp(txattrs) = true;
|
arm_tlb_bti_gp(txattrs) = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cacheattrs != NULL) {
|
|
||||||
if (mmu_idx == ARMMMUIdx_Stage2) {
|
if (mmu_idx == ARMMMUIdx_Stage2) {
|
||||||
cacheattrs->attrs = convert_stage2_attrs(env,
|
cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
|
||||||
extract32(attrs, 0, 4));
|
|
||||||
} else {
|
} else {
|
||||||
/* Index into MAIR registers for cache attributes */
|
/* Index into MAIR registers for cache attributes */
|
||||||
uint8_t attrindx = extract32(attrs, 0, 3);
|
uint8_t attrindx = extract32(attrs, 0, 3);
|
||||||
|
@ -11113,7 +11112,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||||
cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
|
cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
|
||||||
}
|
}
|
||||||
cacheattrs->shareability = extract32(attrs, 6, 2);
|
cacheattrs->shareability = extract32(attrs, 6, 2);
|
||||||
}
|
|
||||||
|
|
||||||
*phys_ptr = descaddr;
|
*phys_ptr = descaddr;
|
||||||
*page_size_ptr = page_size;
|
*page_size_ptr = page_size;
|
||||||
|
@ -11948,14 +11946,17 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||||
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
|
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
|
||||||
mmu_idx == ARMMMUIdx_E10_0,
|
mmu_idx == ARMMMUIdx_E10_0,
|
||||||
phys_ptr, attrs, &s2_prot,
|
phys_ptr, attrs, &s2_prot,
|
||||||
page_size, fi,
|
page_size, fi, &cacheattrs2);
|
||||||
cacheattrs != NULL ? &cacheattrs2 : NULL);
|
|
||||||
fi->s2addr = ipa;
|
fi->s2addr = ipa;
|
||||||
/* Combine the S1 and S2 perms. */
|
/* Combine the S1 and S2 perms. */
|
||||||
*prot &= s2_prot;
|
*prot &= s2_prot;
|
||||||
|
|
||||||
/* Combine the S1 and S2 cache attributes, if needed */
|
/* If S2 fails, return early. */
|
||||||
if (!ret && cacheattrs != NULL) {
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Combine the S1 and S2 cache attributes. */
|
||||||
if (env->cp15.hcr_el2 & HCR_DC) {
|
if (env->cp15.hcr_el2 & HCR_DC) {
|
||||||
/*
|
/*
|
||||||
* HCR.DC forces the first stage attributes to
|
* HCR.DC forces the first stage attributes to
|
||||||
|
@ -11967,9 +11968,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||||
cacheattrs->shareability = 0;
|
cacheattrs->shareability = 0;
|
||||||
}
|
}
|
||||||
*cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
|
*cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
|
||||||
}
|
return 0;
|
||||||
|
|
||||||
return ret;
|
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
|
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
|
||||||
|
@ -12094,11 +12093,12 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
||||||
bool ret;
|
bool ret;
|
||||||
ARMMMUFaultInfo fi = {};
|
ARMMMUFaultInfo fi = {};
|
||||||
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
|
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
|
||||||
|
ARMCacheAttrs cacheattrs = {};
|
||||||
|
|
||||||
*attrs = (MemTxAttrs) {};
|
*attrs = (MemTxAttrs) {};
|
||||||
|
|
||||||
ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
|
ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
|
||||||
attrs, &prot, &page_size, &fi, NULL);
|
attrs, &prot, &page_size, &fi, &cacheattrs);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -1294,7 +1294,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||||
target_ulong *page_size,
|
target_ulong *page_size,
|
||||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
|
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||||
|
__attribute__((nonnull));
|
||||||
|
|
||||||
void arm_log_exception(int idx);
|
void arm_log_exception(int idx);
|
||||||
|
|
||||||
|
|
|
@ -187,12 +187,13 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
|
||||||
hwaddr physaddr;
|
hwaddr physaddr;
|
||||||
int prot;
|
int prot;
|
||||||
ARMMMUFaultInfo fi = {};
|
ARMMMUFaultInfo fi = {};
|
||||||
|
ARMCacheAttrs cacheattrs = {};
|
||||||
bool secure = mmu_idx & ARM_MMU_IDX_M_S;
|
bool secure = mmu_idx & ARM_MMU_IDX_M_S;
|
||||||
int exc;
|
int exc;
|
||||||
bool exc_secure;
|
bool exc_secure;
|
||||||
|
|
||||||
if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
|
if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
|
||||||
&attrs, &prot, &page_size, &fi, NULL)) {
|
&attrs, &prot, &page_size, &fi, &cacheattrs)) {
|
||||||
/* MPU/SAU lookup failed */
|
/* MPU/SAU lookup failed */
|
||||||
if (fi.type == ARMFault_QEMU_SFault) {
|
if (fi.type == ARMFault_QEMU_SFault) {
|
||||||
if (mode == STACK_LAZYFP) {
|
if (mode == STACK_LAZYFP) {
|
||||||
|
@ -279,13 +280,14 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
|
||||||
hwaddr physaddr;
|
hwaddr physaddr;
|
||||||
int prot;
|
int prot;
|
||||||
ARMMMUFaultInfo fi = {};
|
ARMMMUFaultInfo fi = {};
|
||||||
|
ARMCacheAttrs cacheattrs = {};
|
||||||
bool secure = mmu_idx & ARM_MMU_IDX_M_S;
|
bool secure = mmu_idx & ARM_MMU_IDX_M_S;
|
||||||
int exc;
|
int exc;
|
||||||
bool exc_secure;
|
bool exc_secure;
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
|
|
||||||
if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
|
if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
|
||||||
&attrs, &prot, &page_size, &fi, NULL)) {
|
&attrs, &prot, &page_size, &fi, &cacheattrs)) {
|
||||||
/* MPU/SAU lookup failed */
|
/* MPU/SAU lookup failed */
|
||||||
if (fi.type == ARMFault_QEMU_SFault) {
|
if (fi.type == ARMFault_QEMU_SFault) {
|
||||||
qemu_log_mask(CPU_LOG_INT,
|
qemu_log_mask(CPU_LOG_INT,
|
||||||
|
@ -1928,6 +1930,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
||||||
V8M_SAttributes sattrs = {};
|
V8M_SAttributes sattrs = {};
|
||||||
MemTxAttrs attrs = {};
|
MemTxAttrs attrs = {};
|
||||||
ARMMMUFaultInfo fi = {};
|
ARMMMUFaultInfo fi = {};
|
||||||
|
ARMCacheAttrs cacheattrs = {};
|
||||||
MemTxResult txres;
|
MemTxResult txres;
|
||||||
target_ulong page_size;
|
target_ulong page_size;
|
||||||
hwaddr physaddr;
|
hwaddr physaddr;
|
||||||
|
@ -1945,8 +1948,8 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
||||||
"...really SecureFault with SFSR.INVEP\n");
|
"...really SecureFault with SFSR.INVEP\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
|
if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr,
|
||||||
&physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
|
&attrs, &prot, &page_size, &fi, &cacheattrs)) {
|
||||||
/* the MPU lookup failed */
|
/* the MPU lookup failed */
|
||||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
|
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
|
||||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
|
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
|
||||||
|
|
|
@ -166,6 +166,7 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
int prot, ret;
|
int prot, ret;
|
||||||
MemTxAttrs attrs = {};
|
MemTxAttrs attrs = {};
|
||||||
ARMMMUFaultInfo fi = {};
|
ARMMMUFaultInfo fi = {};
|
||||||
|
ARMCacheAttrs cacheattrs = {};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Walk the page table and (if the mapping exists) add the page
|
* Walk the page table and (if the mapping exists) add the page
|
||||||
|
@ -175,7 +176,8 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
*/
|
*/
|
||||||
ret = get_phys_addr(&cpu->env, address, access_type,
|
ret = get_phys_addr(&cpu->env, address, access_type,
|
||||||
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
|
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
|
||||||
&phys_addr, &attrs, &prot, &page_size, &fi, NULL);
|
&phys_addr, &attrs, &prot, &page_size,
|
||||||
|
&fi, &cacheattrs);
|
||||||
if (likely(!ret)) {
|
if (likely(!ret)) {
|
||||||
/*
|
/*
|
||||||
* Map a single [sub]page. Regions smaller than our declared
|
* Map a single [sub]page. Regions smaller than our declared
|
||||||
|
|
Loading…
Reference in New Issue