mirror of https://github.com/xemu-project/xemu.git
target-arm queue:
* hw/misc/stm32l4x5_rcc: Add validation for MCOPRE and MCOSEL values * target/arm: Clear high SVE elements in handle_vec_simd_wshli * target/arm: Fix usage of MMU indexes when EL3 is AArch32 -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAma7eSIZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3gbJEACHhZAvP4f1vic8DNGPw8Yr v+pRQON+vF+PDBSyNkYCRL5Gy1P257Aujw1ed2dpoDhMemC/co67W2zdzToCvDd5 XZxlHb/iUCTeZbA/Zp66ZADlvVOdvvQL8EHbd4mSBEZp9rvPSmxatx4I5jstLiAV 5HimP+AjjGMfklMu+RelW7A7WDRJ0h7F4PwXCA8tLeHPH5XHSkweGYt3OVfSlUAs +RKiltByC/quujLHxrQcVtLZON1KKiB0P8VPRcaR1QIFARiR1IfLvzhKVpqyOlnV 3a+ZILtCJE1YEM+h7Aunz/l9MQ0DZe5DzbIdKOQ7NUkerlhq81kriPp67yLv25lk zgqkHGGDEnIGpSXdmbXTNLcGlH+5O+fWl2RMzYrSFJqvwyRu9egLLi6E0xaNCRvY gdb6CGPhhu21C1o5Nax0CiaZe3vzzRvC5QsIJ0yww6y7VaGFVt/XRaKBdLHB97nZ t/9ifa3fmhVEW6pQEy8VdAeFoxIT2lJ2xJgBdMwpZCJlCxB8xKU/rZfrXKS/UUqV 9Klbcfrx1WFT7zrAWS0Ig7nPttJ+XgjYfgHI3q2e80F6xRmAmaAjnbtVRS+L3It9 eZ4SmuzurWipRLpdmxdOX1IXdZD9rJMzk9IUIZoklctlR/D+75Iuy0N7gY8G2dbp fmh38lEQZ0IC90VmNtWltw== =So/3 -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20240813' of https://git.linaro.org/people/pmaydell/qemu-arm into staging target-arm queue: * hw/misc/stm32l4x5_rcc: Add validation for MCOPRE and MCOSEL values * target/arm: Clear high SVE elements in handle_vec_simd_wshli * target/arm: Fix usage of MMU indexes when EL3 is AArch32 # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAma7eSIZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3gbJEACHhZAvP4f1vic8DNGPw8Yr # v+pRQON+vF+PDBSyNkYCRL5Gy1P257Aujw1ed2dpoDhMemC/co67W2zdzToCvDd5 # XZxlHb/iUCTeZbA/Zp66ZADlvVOdvvQL8EHbd4mSBEZp9rvPSmxatx4I5jstLiAV # 5HimP+AjjGMfklMu+RelW7A7WDRJ0h7F4PwXCA8tLeHPH5XHSkweGYt3OVfSlUAs # +RKiltByC/quujLHxrQcVtLZON1KKiB0P8VPRcaR1QIFARiR1IfLvzhKVpqyOlnV # 3a+ZILtCJE1YEM+h7Aunz/l9MQ0DZe5DzbIdKOQ7NUkerlhq81kriPp67yLv25lk # zgqkHGGDEnIGpSXdmbXTNLcGlH+5O+fWl2RMzYrSFJqvwyRu9egLLi6E0xaNCRvY # gdb6CGPhhu21C1o5Nax0CiaZe3vzzRvC5QsIJ0yww6y7VaGFVt/XRaKBdLHB97nZ # t/9ifa3fmhVEW6pQEy8VdAeFoxIT2lJ2xJgBdMwpZCJlCxB8xKU/rZfrXKS/UUqV # 9Klbcfrx1WFT7zrAWS0Ig7nPttJ+XgjYfgHI3q2e80F6xRmAmaAjnbtVRS+L3It9 # eZ4SmuzurWipRLpdmxdOX1IXdZD9rJMzk9IUIZoklctlR/D+75Iuy0N7gY8G2dbp # fmh38lEQZ0IC90VmNtWltw== # =So/3 # -----END PGP SIGNATURE----- # gpg: Signature made Wed 14 Aug 2024 01:17:54 AM AEST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] * tag 'pull-target-arm-20240813' of https://git.linaro.org/people/pmaydell/qemu-arm: target/arm: Fix usage of MMU indexes when EL3 is AArch32 target/arm: Update translation regime comment for new features target/arm: Clear high SVE elements in handle_vec_simd_wshli hw/misc/stm32l4x5_rcc: Add validation for MCOPRE and MCOSEL values Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
3cc050c540
|
@ -543,19 +543,31 @@ static void rcc_update_cfgr_register(Stm32l4x5RccState *s)
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
/* MCOPRE */
|
/* MCOPRE */
|
||||||
val = FIELD_EX32(s->cfgr, CFGR, MCOPRE);
|
val = FIELD_EX32(s->cfgr, CFGR, MCOPRE);
|
||||||
assert(val <= 0b100);
|
if (val > 0b100) {
|
||||||
clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_MCO],
|
qemu_log_mask(LOG_GUEST_ERROR,
|
||||||
1, 1 << val);
|
"%s: Invalid MCOPRE value: 0x%"PRIx32"\n",
|
||||||
|
__func__, val);
|
||||||
|
clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false);
|
||||||
|
} else {
|
||||||
|
clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_MCO],
|
||||||
|
1, 1 << val);
|
||||||
|
}
|
||||||
|
|
||||||
/* MCOSEL */
|
/* MCOSEL */
|
||||||
val = FIELD_EX32(s->cfgr, CFGR, MCOSEL);
|
val = FIELD_EX32(s->cfgr, CFGR, MCOSEL);
|
||||||
assert(val <= 0b111);
|
if (val > 0b111) {
|
||||||
if (val == 0) {
|
qemu_log_mask(LOG_GUEST_ERROR,
|
||||||
|
"%s: Invalid MCOSEL value: 0x%"PRIx32"\n",
|
||||||
|
__func__, val);
|
||||||
clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false);
|
clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false);
|
||||||
} else {
|
} else {
|
||||||
clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], true);
|
if (val == 0) {
|
||||||
clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_MCO],
|
clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false);
|
||||||
val - 1);
|
} else {
|
||||||
|
clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], true);
|
||||||
|
clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_MCO],
|
||||||
|
val - 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* STOPWUCK */
|
/* STOPWUCK */
|
||||||
|
|
|
@ -2772,14 +2772,19 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
|
||||||
* + NonSecure EL1 & 0 stage 2
|
* + NonSecure EL1 & 0 stage 2
|
||||||
* + NonSecure EL2
|
* + NonSecure EL2
|
||||||
* + NonSecure EL2 & 0 (ARMv8.1-VHE)
|
* + NonSecure EL2 & 0 (ARMv8.1-VHE)
|
||||||
* + Secure EL1 & 0
|
* + Secure EL1 & 0 stage 1
|
||||||
* + Secure EL3
|
* + Secure EL1 & 0 stage 2 (FEAT_SEL2)
|
||||||
|
* + Secure EL2 (FEAT_SEL2)
|
||||||
|
* + Secure EL2 & 0 (FEAT_SEL2)
|
||||||
|
* + Realm EL1 & 0 stage 1 (FEAT_RME)
|
||||||
|
* + Realm EL1 & 0 stage 2 (FEAT_RME)
|
||||||
|
* + Realm EL2 (FEAT_RME)
|
||||||
|
* + EL3
|
||||||
* If EL3 is 32-bit:
|
* If EL3 is 32-bit:
|
||||||
* + NonSecure PL1 & 0 stage 1
|
* + NonSecure PL1 & 0 stage 1
|
||||||
* + NonSecure PL1 & 0 stage 2
|
* + NonSecure PL1 & 0 stage 2
|
||||||
* + NonSecure PL2
|
* + NonSecure PL2
|
||||||
* + Secure PL0
|
* + Secure PL1 & 0
|
||||||
* + Secure PL1
|
|
||||||
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
|
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
|
||||||
*
|
*
|
||||||
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
|
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
|
||||||
|
@ -2797,37 +2802,42 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
|
||||||
* The only use of stage 2 translations is either as part of an s1+2
|
* The only use of stage 2 translations is either as part of an s1+2
|
||||||
* lookup or when loading the descriptors during a stage 1 page table walk,
|
* lookup or when loading the descriptors during a stage 1 page table walk,
|
||||||
* and in both those cases we don't use the TLB.
|
* and in both those cases we don't use the TLB.
|
||||||
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
|
* 4. we want to be able to use the TLB for accesses done as part of a
|
||||||
* translation regimes, because they map reasonably well to each other
|
|
||||||
* and they can't both be active at the same time.
|
|
||||||
* 5. we want to be able to use the TLB for accesses done as part of a
|
|
||||||
* stage1 page table walk, rather than having to walk the stage2 page
|
* stage1 page table walk, rather than having to walk the stage2 page
|
||||||
* table over and over.
|
* table over and over.
|
||||||
* 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
|
* 5. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
|
||||||
* Never (PAN) bit within PSTATE.
|
* Never (PAN) bit within PSTATE.
|
||||||
* 7. we fold together the secure and non-secure regimes for A-profile,
|
* 6. we fold together most secure and non-secure regimes for A-profile,
|
||||||
* because there are no banked system registers for aarch64, so the
|
* because there are no banked system registers for aarch64, so the
|
||||||
* process of switching between secure and non-secure is
|
* process of switching between secure and non-secure is
|
||||||
* already heavyweight.
|
* already heavyweight.
|
||||||
|
* 7. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
|
||||||
|
* because both are in use simultaneously for Secure EL2.
|
||||||
*
|
*
|
||||||
* This gives us the following list of cases:
|
* This gives us the following list of cases:
|
||||||
*
|
*
|
||||||
* EL0 EL1&0 stage 1+2 (aka NS PL0)
|
* EL0 EL1&0 stage 1+2 (or AArch32 PL0 PL1&0 stage 1+2)
|
||||||
* EL1 EL1&0 stage 1+2 (aka NS PL1)
|
* EL1 EL1&0 stage 1+2 (or AArch32 PL1 PL1&0 stage 1+2)
|
||||||
* EL1 EL1&0 stage 1+2 +PAN
|
* EL1 EL1&0 stage 1+2 +PAN (or AArch32 PL1 PL1&0 stage 1+2 +PAN)
|
||||||
* EL0 EL2&0
|
* EL0 EL2&0
|
||||||
* EL2 EL2&0
|
* EL2 EL2&0
|
||||||
* EL2 EL2&0 +PAN
|
* EL2 EL2&0 +PAN
|
||||||
* EL2 (aka NS PL2)
|
* EL2 (aka NS PL2)
|
||||||
* EL3 (aka S PL1)
|
* EL3 (not used when EL3 is AArch32)
|
||||||
* Physical (NS & S)
|
* Stage2 Secure
|
||||||
* Stage2 (NS & S)
|
* Stage2 NonSecure
|
||||||
|
* plus one TLB per Physical address space: S, NS, Realm, Root
|
||||||
*
|
*
|
||||||
* for a total of 12 different mmu_idx.
|
* for a total of 14 different mmu_idx.
|
||||||
|
*
|
||||||
|
* Note that when EL3 is AArch32, the usage is potentially confusing
|
||||||
|
* because the MMU indexes are named for their AArch64 use, so code
|
||||||
|
* using the ARMMMUIdx_E10_1 might be at EL3, not EL1. This is because
|
||||||
|
* Secure PL1 is always at EL3.
|
||||||
*
|
*
|
||||||
* R profile CPUs have an MPU, but can use the same set of MMU indexes
|
* R profile CPUs have an MPU, but can use the same set of MMU indexes
|
||||||
* as A profile. They only need to distinguish EL0 and EL1 (and
|
* as A profile. They only need to distinguish EL0 and EL1 (and
|
||||||
* EL2 if we ever model a Cortex-R52).
|
* EL2 for cores like the Cortex-R52).
|
||||||
*
|
*
|
||||||
* M profile CPUs are rather different as they do not have a true MMU.
|
* M profile CPUs are rather different as they do not have a true MMU.
|
||||||
* They have the following different MMU indexes:
|
* They have the following different MMU indexes:
|
||||||
|
@ -3117,6 +3127,10 @@ FIELD(TBFLAG_A32, NS, 10, 1)
|
||||||
* This requires an SME trap from AArch32 mode when using NEON.
|
* This requires an SME trap from AArch32 mode when using NEON.
|
||||||
*/
|
*/
|
||||||
FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1)
|
FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1)
|
||||||
|
/*
|
||||||
|
* Indicates whether we are in the Secure PL1&0 translation regime
|
||||||
|
*/
|
||||||
|
FIELD(TBFLAG_A32, S_PL1_0, 12, 1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bit usage when in AArch32 state, for M-profile only.
|
* Bit usage when in AArch32 state, for M-profile only.
|
||||||
|
|
|
@ -3700,7 +3700,7 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||||
*/
|
*/
|
||||||
format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
|
format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
|
||||||
|
|
||||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
if (arm_feature(env, ARM_FEATURE_EL2) && !arm_aa32_secure_pl1_0(env)) {
|
||||||
if (mmu_idx == ARMMMUIdx_E10_0 ||
|
if (mmu_idx == ARMMMUIdx_E10_0 ||
|
||||||
mmu_idx == ARMMMUIdx_E10_1 ||
|
mmu_idx == ARMMMUIdx_E10_1 ||
|
||||||
mmu_idx == ARMMMUIdx_E10_1_PAN) {
|
mmu_idx == ARMMMUIdx_E10_1_PAN) {
|
||||||
|
@ -3774,13 +3774,11 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||||
case 0:
|
case 0:
|
||||||
/* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
|
/* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
|
||||||
switch (el) {
|
switch (el) {
|
||||||
case 3:
|
|
||||||
mmu_idx = ARMMMUIdx_E3;
|
|
||||||
break;
|
|
||||||
case 2:
|
case 2:
|
||||||
g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
|
g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case 1:
|
case 1:
|
||||||
|
case 3:
|
||||||
if (ri->crm == 9 && arm_pan_enabled(env)) {
|
if (ri->crm == 9 && arm_pan_enabled(env)) {
|
||||||
mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
|
mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
|
||||||
} else {
|
} else {
|
||||||
|
@ -11861,8 +11859,11 @@ void arm_cpu_do_interrupt(CPUState *cs)
|
||||||
|
|
||||||
uint64_t arm_sctlr(CPUARMState *env, int el)
|
uint64_t arm_sctlr(CPUARMState *env, int el)
|
||||||
{
|
{
|
||||||
/* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
|
if (arm_aa32_secure_pl1_0(env)) {
|
||||||
if (el == 0) {
|
/* In Secure PL1&0 SCTLR_S is always controlling */
|
||||||
|
el = 3;
|
||||||
|
} else if (el == 0) {
|
||||||
|
/* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
|
||||||
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
|
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
|
||||||
el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
|
el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
|
||||||
}
|
}
|
||||||
|
@ -12522,8 +12523,12 @@ int fp_exception_el(CPUARMState *env, int cur_el)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return the exception level we're running at if this is our mmu_idx */
|
/*
|
||||||
int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
|
* Return the exception level we're running at if this is our mmu_idx.
|
||||||
|
* s_pl1_0 should be true if this is the AArch32 Secure PL1&0 translation
|
||||||
|
* regime.
|
||||||
|
*/
|
||||||
|
int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0)
|
||||||
{
|
{
|
||||||
if (mmu_idx & ARM_MMU_IDX_M) {
|
if (mmu_idx & ARM_MMU_IDX_M) {
|
||||||
return mmu_idx & ARM_MMU_IDX_M_PRIV;
|
return mmu_idx & ARM_MMU_IDX_M_PRIV;
|
||||||
|
@ -12535,7 +12540,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
|
||||||
return 0;
|
return 0;
|
||||||
case ARMMMUIdx_E10_1:
|
case ARMMMUIdx_E10_1:
|
||||||
case ARMMMUIdx_E10_1_PAN:
|
case ARMMMUIdx_E10_1_PAN:
|
||||||
return 1;
|
return s_pl1_0 ? 3 : 1;
|
||||||
case ARMMMUIdx_E2:
|
case ARMMMUIdx_E2:
|
||||||
case ARMMMUIdx_E20_2:
|
case ARMMMUIdx_E20_2:
|
||||||
case ARMMMUIdx_E20_2_PAN:
|
case ARMMMUIdx_E20_2_PAN:
|
||||||
|
@ -12573,6 +12578,15 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
|
||||||
idx = ARMMMUIdx_E10_0;
|
idx = ARMMMUIdx_E10_0;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case 3:
|
||||||
|
/*
|
||||||
|
* AArch64 EL3 has its own translation regime; AArch32 EL3
|
||||||
|
* uses the Secure PL1&0 translation regime.
|
||||||
|
*/
|
||||||
|
if (arm_el_is_aa64(env, 3)) {
|
||||||
|
return ARMMMUIdx_E3;
|
||||||
|
}
|
||||||
|
/* fall through */
|
||||||
case 1:
|
case 1:
|
||||||
if (arm_pan_enabled(env)) {
|
if (arm_pan_enabled(env)) {
|
||||||
idx = ARMMMUIdx_E10_1_PAN;
|
idx = ARMMMUIdx_E10_1_PAN;
|
||||||
|
@ -12592,8 +12606,6 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
|
||||||
idx = ARMMMUIdx_E2;
|
idx = ARMMMUIdx_E2;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 3:
|
|
||||||
return ARMMMUIdx_E3;
|
|
||||||
default:
|
default:
|
||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
|
|
|
@ -275,6 +275,20 @@ FIELD(CNTHCTL, CNTPMASK, 19, 1)
|
||||||
#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
|
#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
|
||||||
#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
|
#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* arm_aa32_secure_pl1_0(): Return true if in Secure PL1&0 regime
|
||||||
|
*
|
||||||
|
* Return true if the CPU is in the Secure PL1&0 translation regime.
|
||||||
|
* This requires that EL3 exists and is AArch32 and we are currently
|
||||||
|
* Secure. If this is the case then the ARMMMUIdx_E10* apply and
|
||||||
|
* mean we are in EL3, not EL1.
|
||||||
|
*/
|
||||||
|
static inline bool arm_aa32_secure_pl1_0(CPUARMState *env)
|
||||||
|
{
|
||||||
|
return arm_feature(env, ARM_FEATURE_EL3) &&
|
||||||
|
!arm_el_is_aa64(env, 3) && arm_is_secure(env);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* raise_exception: Raise the specified exception.
|
* raise_exception: Raise the specified exception.
|
||||||
* Raise a guest exception with the specified value, syndrome register
|
* Raise a guest exception with the specified value, syndrome register
|
||||||
|
@ -808,7 +822,12 @@ static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
|
||||||
return mmu_idx | ARM_MMU_IDX_A;
|
return mmu_idx | ARM_MMU_IDX_A;
|
||||||
}
|
}
|
||||||
|
|
||||||
int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
|
/**
|
||||||
|
* Return the exception level we're running at if our current MMU index
|
||||||
|
* is @mmu_idx. @s_pl1_0 should be true if this is the AArch32
|
||||||
|
* Secure PL1&0 translation regime.
|
||||||
|
*/
|
||||||
|
int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0);
|
||||||
|
|
||||||
/* Return the MMU index for a v7M CPU in the specified security state */
|
/* Return the MMU index for a v7M CPU in the specified security state */
|
||||||
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
|
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
|
||||||
|
@ -903,11 +922,11 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
|
||||||
return 3;
|
return 3;
|
||||||
case ARMMMUIdx_E10_0:
|
case ARMMMUIdx_E10_0:
|
||||||
case ARMMMUIdx_Stage1_E0:
|
case ARMMMUIdx_Stage1_E0:
|
||||||
return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
|
|
||||||
case ARMMMUIdx_Stage1_E1:
|
|
||||||
case ARMMMUIdx_Stage1_E1_PAN:
|
|
||||||
case ARMMMUIdx_E10_1:
|
case ARMMMUIdx_E10_1:
|
||||||
case ARMMMUIdx_E10_1_PAN:
|
case ARMMMUIdx_E10_1_PAN:
|
||||||
|
case ARMMMUIdx_Stage1_E1:
|
||||||
|
case ARMMMUIdx_Stage1_E1_PAN:
|
||||||
|
return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
|
||||||
case ARMMMUIdx_MPrivNegPri:
|
case ARMMMUIdx_MPrivNegPri:
|
||||||
case ARMMMUIdx_MUserNegPri:
|
case ARMMMUIdx_MUserNegPri:
|
||||||
case ARMMMUIdx_MPriv:
|
case ARMMMUIdx_MPriv:
|
||||||
|
|
|
@ -3576,7 +3576,11 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||||
case ARMMMUIdx_Stage1_E1:
|
case ARMMMUIdx_Stage1_E1:
|
||||||
case ARMMMUIdx_Stage1_E1_PAN:
|
case ARMMMUIdx_Stage1_E1_PAN:
|
||||||
case ARMMMUIdx_E2:
|
case ARMMMUIdx_E2:
|
||||||
ss = arm_security_space_below_el3(env);
|
if (arm_aa32_secure_pl1_0(env)) {
|
||||||
|
ss = ARMSS_Secure;
|
||||||
|
} else {
|
||||||
|
ss = arm_security_space_below_el3(env);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case ARMMMUIdx_Stage2:
|
case ARMMMUIdx_Stage2:
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -198,6 +198,10 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
|
||||||
DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
|
DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (arm_aa32_secure_pl1_0(env)) {
|
||||||
|
DP_TBFLAG_A32(flags, S_PL1_0, 1);
|
||||||
|
}
|
||||||
|
|
||||||
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
|
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10756,6 +10756,7 @@ static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
|
||||||
tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
|
tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
|
||||||
write_vec_element(s, tcg_rd, rd, i, size + 1);
|
write_vec_element(s, tcg_rd, rd, i, size + 1);
|
||||||
}
|
}
|
||||||
|
clear_vec_high(s, true, rd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
|
/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
|
||||||
|
@ -11978,7 +11979,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
||||||
dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
|
dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
|
||||||
dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
|
dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
|
||||||
dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
|
dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
|
||||||
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
|
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx, false);
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
dc->user = (dc->current_el == 0);
|
dc->user = (dc->current_el == 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -7546,10 +7546,6 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||||
|
|
||||||
core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
|
core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
|
||||||
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
|
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
|
||||||
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
|
||||||
dc->user = (dc->current_el == 0);
|
|
||||||
#endif
|
|
||||||
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
|
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
|
||||||
dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
|
dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
|
||||||
dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
|
dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
|
||||||
|
@ -7580,7 +7576,12 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||||
}
|
}
|
||||||
dc->sme_trap_nonstreaming =
|
dc->sme_trap_nonstreaming =
|
||||||
EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
|
EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
|
||||||
|
dc->s_pl1_0 = EX_TBFLAG_A32(tb_flags, S_PL1_0);
|
||||||
}
|
}
|
||||||
|
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx, dc->s_pl1_0);
|
||||||
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
dc->user = (dc->current_el == 0);
|
||||||
|
#endif
|
||||||
dc->lse2 = false; /* applies only to aarch64 */
|
dc->lse2 = false; /* applies only to aarch64 */
|
||||||
dc->cp_regs = cpu->cp_regs;
|
dc->cp_regs = cpu->cp_regs;
|
||||||
dc->features = env->features;
|
dc->features = env->features;
|
||||||
|
|
|
@ -165,6 +165,8 @@ typedef struct DisasContext {
|
||||||
uint8_t gm_blocksize;
|
uint8_t gm_blocksize;
|
||||||
/* True if the current insn_start has been updated. */
|
/* True if the current insn_start has been updated. */
|
||||||
bool insn_start_updated;
|
bool insn_start_updated;
|
||||||
|
/* True if this is the AArch32 Secure PL1&0 translation regime */
|
||||||
|
bool s_pl1_0;
|
||||||
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
|
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
|
||||||
int c15_cpar;
|
int c15_cpar;
|
||||||
/* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */
|
/* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */
|
||||||
|
|
Loading…
Reference in New Issue