mirror of https://github.com/xemu-project/xemu.git
target/arm: Introduce arm_mmu_idx
The pattern ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); is computing the full ARMMMUIdx, stripping off the ARM bits, and then putting them back. Avoid the extra two steps with the appropriate helper function. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20190108223129.5570-17-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
65e4655c60
commit
50494a279d
|
@ -2749,7 +2749,14 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
|
||||||
/* Return the MMU index for a v7M CPU in the specified security state */
|
/* Return the MMU index for a v7M CPU in the specified security state */
|
||||||
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
|
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
|
||||||
|
|
||||||
/* Determine the current mmu_idx to use for normal loads/stores */
|
/**
|
||||||
|
* cpu_mmu_index:
|
||||||
|
* @env: The cpu environment
|
||||||
|
* @ifetch: True for code access, false for data access.
|
||||||
|
*
|
||||||
|
* Return the core mmu index for the current translation regime.
|
||||||
|
* This function is used by generic TCG code paths.
|
||||||
|
*/
|
||||||
int cpu_mmu_index(CPUARMState *env, bool ifetch);
|
int cpu_mmu_index(CPUARMState *env, bool ifetch);
|
||||||
|
|
||||||
/* Indexes used when registering address spaces with cpu_address_space_init */
|
/* Indexes used when registering address spaces with cpu_address_space_init */
|
||||||
|
|
|
@ -7117,7 +7117,7 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
|
||||||
limit = env->v7m.msplim[M_REG_S];
|
limit = env->v7m.msplim[M_REG_S];
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
|
mmu_idx = arm_mmu_idx(env);
|
||||||
frame_sp_p = &env->regs[13];
|
frame_sp_p = &env->regs[13];
|
||||||
limit = v7m_sp_limit(env);
|
limit = v7m_sp_limit(env);
|
||||||
}
|
}
|
||||||
|
@ -7298,7 +7298,7 @@ static bool v7m_push_stack(ARMCPU *cpu)
|
||||||
CPUARMState *env = &cpu->env;
|
CPUARMState *env = &cpu->env;
|
||||||
uint32_t xpsr = xpsr_read(env);
|
uint32_t xpsr = xpsr_read(env);
|
||||||
uint32_t frameptr = env->regs[13];
|
uint32_t frameptr = env->regs[13];
|
||||||
ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
|
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
|
||||||
|
|
||||||
/* Align stack pointer if the guest wants that */
|
/* Align stack pointer if the guest wants that */
|
||||||
if ((frameptr & 4) &&
|
if ((frameptr & 4) &&
|
||||||
|
@ -11073,7 +11073,7 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
||||||
int prot;
|
int prot;
|
||||||
bool ret;
|
bool ret;
|
||||||
ARMMMUFaultInfo fi = {};
|
ARMMMUFaultInfo fi = {};
|
||||||
ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
|
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
|
||||||
|
|
||||||
*attrs = (MemTxAttrs) {};
|
*attrs = (MemTxAttrs) {};
|
||||||
|
|
||||||
|
@ -12977,26 +12977,31 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
|
||||||
return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
|
return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
int cpu_mmu_index(CPUARMState *env, bool ifetch)
|
ARMMMUIdx arm_mmu_idx(CPUARMState *env)
|
||||||
{
|
{
|
||||||
int el = arm_current_el(env);
|
int el;
|
||||||
|
|
||||||
if (arm_feature(env, ARM_FEATURE_M)) {
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||||
ARMMMUIdx mmu_idx = arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
|
return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
|
||||||
|
|
||||||
return arm_to_core_mmu_idx(mmu_idx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
el = arm_current_el(env);
|
||||||
if (el < 2 && arm_is_secure_below_el3(env)) {
|
if (el < 2 && arm_is_secure_below_el3(env)) {
|
||||||
return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0 + el);
|
return ARMMMUIdx_S1SE0 + el;
|
||||||
|
} else {
|
||||||
|
return ARMMMUIdx_S12NSE0 + el;
|
||||||
}
|
}
|
||||||
return el;
|
}
|
||||||
|
|
||||||
|
int cpu_mmu_index(CPUARMState *env, bool ifetch)
|
||||||
|
{
|
||||||
|
return arm_to_core_mmu_idx(arm_mmu_idx(env));
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||||
target_ulong *cs_base, uint32_t *pflags)
|
target_ulong *cs_base, uint32_t *pflags)
|
||||||
{
|
{
|
||||||
ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
|
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
|
||||||
int current_el = arm_current_el(env);
|
int current_el = arm_current_el(env);
|
||||||
int fp_el = fp_exception_el(env, current_el);
|
int fp_el = fp_exception_el(env, current_el);
|
||||||
uint32_t flags = 0;
|
uint32_t flags = 0;
|
||||||
|
|
|
@ -919,4 +919,12 @@ void arm_cpu_update_virq(ARMCPU *cpu);
|
||||||
*/
|
*/
|
||||||
void arm_cpu_update_vfiq(ARMCPU *cpu);
|
void arm_cpu_update_vfiq(ARMCPU *cpu);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* arm_mmu_idx:
|
||||||
|
* @env: The cpu environment
|
||||||
|
*
|
||||||
|
* Return the full ARMMMUIdx for the current translation regime.
|
||||||
|
*/
|
||||||
|
ARMMMUIdx arm_mmu_idx(CPUARMState *env);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue