mirror of https://github.com/xemu-project/xemu.git
target-arm queue:
* Switch to TARGET_TB_PCREL * More pagetable-walk refactoring preparatory to HAFDBS * update the cortex-a15 MIDR to latest rev * hw/char/pl011: fix baud rate calculation * hw/ide/microdrive: Use device_cold_reset() for self-resets -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmNRPQ4ZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3l/BD/0ZAUmL9FtLHUtf9Y7S1sb/ nuKcbaXim88AWRgSpiCJaSCIiyxNPydrRwfC1wZ7kQEXBKczR9x3ZsE8jdU6lway muqaLdzpBKVYZSh0Q03+3iufbm7s3hGxvCvNj0s5986ITmW86ndZM9CGm4bkyB6B KKN7v7lGPeYCF/sR+Ax/++eXK2BOALoPFF7bWycmJNcO4aYXE12onDNIDJU9CyCu y+nqPXTM5xAJ950iCOmdGKnMwOX1sBfZYUvCuG4tS63SYnjnKThyfLDttggapTB0 T4I1SZ22+Bh7UuGEn9JAIeVZK4UUkYu78ufnX7FT2NDOFJJV1BJtKRe8UT/CsZha n2eAuVcddBwEStAhgUIvpbDm6WjtH3xiQWPphFX9bZl4AD1uI3sx/xe1b7PsPBTc e8kYRVW8zkRlCbC2BaJnPhcDiujf6AE2Sq3A8hgG1izS6juvHzzk9zn69TTbodIl ETtt+jn9E/OtlLC7bLJGgudGerqBELubwwVAHqYo207fvhLLiMMAhm0NbZmnJCqU hEgg6a9w4Ss3CYx5oNagWZ2Nfw5ru5KiwjZJ1wltjbBEQzp4y42a4XFWRvcXFFiz 3yGyvLmtHBywsIRpMc8IpXLC1SdD0fgT8/5VbWCHRziFjQLNCiuihsKkdiZOcJ22 MoetN7iJxSDPCbHUp8ldsg== =NEl6 -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20221020' of https://git.linaro.org/people/pmaydell/qemu-arm into staging target-arm queue: * Switch to TARGET_TB_PCREL * More pagetable-walk refactoring preparatory to HAFDBS * update the cortex-a15 MIDR to latest rev * hw/char/pl011: fix baud rate calculation * hw/ide/microdrive: Use device_cold_reset() for self-resets # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmNRPQ4ZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3l/BD/0ZAUmL9FtLHUtf9Y7S1sb/ # nuKcbaXim88AWRgSpiCJaSCIiyxNPydrRwfC1wZ7kQEXBKczR9x3ZsE8jdU6lway # muqaLdzpBKVYZSh0Q03+3iufbm7s3hGxvCvNj0s5986ITmW86ndZM9CGm4bkyB6B # KKN7v7lGPeYCF/sR+Ax/++eXK2BOALoPFF7bWycmJNcO4aYXE12onDNIDJU9CyCu # y+nqPXTM5xAJ950iCOmdGKnMwOX1sBfZYUvCuG4tS63SYnjnKThyfLDttggapTB0 # T4I1SZ22+Bh7UuGEn9JAIeVZK4UUkYu78ufnX7FT2NDOFJJV1BJtKRe8UT/CsZha # n2eAuVcddBwEStAhgUIvpbDm6WjtH3xiQWPphFX9bZl4AD1uI3sx/xe1b7PsPBTc # e8kYRVW8zkRlCbC2BaJnPhcDiujf6AE2Sq3A8hgG1izS6juvHzzk9zn69TTbodIl # ETtt+jn9E/OtlLC7bLJGgudGerqBELubwwVAHqYo207fvhLLiMMAhm0NbZmnJCqU # hEgg6a9w4Ss3CYx5oNagWZ2Nfw5ru5KiwjZJ1wltjbBEQzp4y42a4XFWRvcXFFiz # 3yGyvLmtHBywsIRpMc8IpXLC1SdD0fgT8/5VbWCHRziFjQLNCiuihsKkdiZOcJ22 # MoetN7iJxSDPCbHUp8ldsg== # =NEl6 # -----END PGP SIGNATURE----- # gpg: Signature made Thu 20 Oct 2022 08:20:30 EDT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * tag 'pull-target-arm-20221020' of https://git.linaro.org/people/pmaydell/qemu-arm: (24 commits) hw/ide/microdrive: Use device_cold_reset() for self-resets target/arm: Enable TARGET_TB_PCREL target/arm: Introduce gen_pc_plus_diff for aarch32 target/arm: Introduce gen_pc_plus_diff for aarch64 target/arm: Change gen_jmp* to work on displacements target/arm: Remove gen_exception_internal_insn pc argument target/arm: Change gen_exception_insn* to work on displacements target/arm: Change gen_*set_pc_im to gen_*update_pc target/arm: Change gen_goto_tb to work on displacements target/arm: Introduce curr_insn_len target/arm: Use bool consistently for get_phys_addr subroutines target/arm: Split out get_phys_addr_twostage target/arm: Use softmmu tlbs for page table walking target/arm: Move be test for regime into S1TranslateResult target/arm: Plumb debug into S1Translate target/arm: Split out S1Translate type target/arm: Restrict tlb flush from vttbr_write to vmid change target/arm: Move ARMMMUIdx_Stage2 to a real tlb mmu_idx target/arm: Add ARMMMUIdx_Phys_{S,NS} target/arm: Use probe_access_full for BTI ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
0529245488
|
@ -176,7 +176,7 @@ static unsigned int pl011_get_baudrate(const PL011State *s)
|
|||
{
|
||||
uint64_t clk;
|
||||
|
||||
if (s->fbrd == 0) {
|
||||
if (s->ibrd == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -175,7 +175,7 @@ static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value)
|
|||
case 0x00: /* Configuration Option Register */
|
||||
s->opt = value & 0xcf;
|
||||
if (value & OPT_SRESET) {
|
||||
device_legacy_reset(DEVICE(s));
|
||||
device_cold_reset(DEVICE(s));
|
||||
}
|
||||
md_interrupt_update(s);
|
||||
break;
|
||||
|
@ -318,7 +318,7 @@ static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value)
|
|||
case 0xe: /* Device Control */
|
||||
s->ctrl = value;
|
||||
if (value & CTRL_SRST) {
|
||||
device_legacy_reset(DEVICE(s));
|
||||
device_cold_reset(DEVICE(s));
|
||||
}
|
||||
md_interrupt_update(s);
|
||||
break;
|
||||
|
@ -543,7 +543,7 @@ static int dscm1xxxx_attach(PCMCIACardState *card)
|
|||
md->attr_base = pcc->cis[0x74] | (pcc->cis[0x76] << 8);
|
||||
md->io_base = 0x0;
|
||||
|
||||
device_legacy_reset(DEVICE(md));
|
||||
device_cold_reset(DEVICE(md));
|
||||
md_interrupt_update(md);
|
||||
|
||||
return 0;
|
||||
|
@ -553,7 +553,7 @@ static int dscm1xxxx_detach(PCMCIACardState *card)
|
|||
{
|
||||
MicroDriveState *md = MICRODRIVE(card);
|
||||
|
||||
device_legacy_reset(DEVICE(md));
|
||||
device_cold_reset(DEVICE(md));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,8 +30,23 @@
|
|||
*/
|
||||
# define TARGET_PAGE_BITS_VARY
|
||||
# define TARGET_PAGE_BITS_MIN 10
|
||||
|
||||
# define TARGET_TB_PCREL 1
|
||||
|
||||
/*
|
||||
* Cache the attrs and shareability fields from the page table entry.
|
||||
*
|
||||
* For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
|
||||
* Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
|
||||
* For shareability and guarded, as in the SH and GP fields respectively
|
||||
* of the VMSAv8-64 PTEs.
|
||||
*/
|
||||
# define TARGET_PAGE_ENTRY_EXTRA \
|
||||
uint8_t pte_attrs; \
|
||||
uint8_t shareability; \
|
||||
bool guarded;
|
||||
#endif
|
||||
|
||||
#define NB_MMU_MODES 8
|
||||
#define NB_MMU_MODES 12
|
||||
|
||||
#endif
|
||||
|
|
|
@ -76,17 +76,18 @@ static vaddr arm_cpu_get_pc(CPUState *cs)
|
|||
void arm_cpu_synchronize_from_tb(CPUState *cs,
|
||||
const TranslationBlock *tb)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
/*
|
||||
* It's OK to look at env for the current mode here, because it's
|
||||
* never possible for an AArch64 TB to chain to an AArch32 TB.
|
||||
*/
|
||||
if (is_a64(env)) {
|
||||
env->pc = tb_pc(tb);
|
||||
} else {
|
||||
env->regs[15] = tb_pc(tb);
|
||||
/* The program counter is always up to date with TARGET_TB_PCREL. */
|
||||
if (!TARGET_TB_PCREL) {
|
||||
CPUARMState *env = cs->env_ptr;
|
||||
/*
|
||||
* It's OK to look at env for the current mode here, because it's
|
||||
* never possible for an AArch64 TB to chain to an AArch32 TB.
|
||||
*/
|
||||
if (is_a64(env)) {
|
||||
env->pc = tb_pc(tb);
|
||||
} else {
|
||||
env->regs[15] = tb_pc(tb);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_TCG */
|
||||
|
|
|
@ -225,6 +225,8 @@ typedef struct CPUARMTBFlags {
|
|||
target_ulong flags2;
|
||||
} CPUARMTBFlags;
|
||||
|
||||
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
|
||||
|
||||
typedef struct CPUArchState {
|
||||
/* Regs for current mode. */
|
||||
uint32_t regs[16];
|
||||
|
@ -715,6 +717,9 @@ typedef struct CPUArchState {
|
|||
struct CPUBreakpoint *cpu_breakpoint[16];
|
||||
struct CPUWatchpoint *cpu_watchpoint[16];
|
||||
|
||||
/* Optional fault info across tlb lookup. */
|
||||
ARMMMUFaultInfo *tlb_fi;
|
||||
|
||||
/* Fields up to this point are cleared by a CPU reset */
|
||||
struct {} end_reset_fields;
|
||||
|
||||
|
@ -2905,8 +2910,10 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
|
|||
* EL2 EL2&0 +PAN
|
||||
* EL2 (aka NS PL2)
|
||||
* EL3 (aka S PL1)
|
||||
* Physical (NS & S)
|
||||
* Stage2 (NS & S)
|
||||
*
|
||||
* for a total of 8 different mmu_idx.
|
||||
* for a total of 12 different mmu_idx.
|
||||
*
|
||||
* R profile CPUs have an MPU, but can use the same set of MMU indexes
|
||||
* as A profile. They only need to distinguish EL0 and EL1 (and
|
||||
|
@ -2971,6 +2978,19 @@ typedef enum ARMMMUIdx {
|
|||
ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
|
||||
|
||||
/* TLBs with 1-1 mapping to the physical address spaces. */
|
||||
ARMMMUIdx_Phys_NS = 8 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_S = 9 | ARM_MMU_IDX_A,
|
||||
|
||||
/*
|
||||
* Used for second stage of an S12 page table walk, or for descriptor
|
||||
* loads during first stage of an S1 page table walk. Note that both
|
||||
* are in use simultaneously for SecureEL2: the security state for
|
||||
* the S2 ptw is selected by the NS bit from the S1 ptw.
|
||||
*/
|
||||
ARMMMUIdx_Stage2 = 10 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Stage2_S = 11 | ARM_MMU_IDX_A,
|
||||
|
||||
/*
|
||||
* These are not allocated TLBs and are used only for AT system
|
||||
* instructions or for the first stage of an S12 page table walk.
|
||||
|
@ -2978,15 +2998,6 @@ typedef enum ARMMMUIdx {
|
|||
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
|
||||
/*
|
||||
* Not allocated a TLB: used only for second stage of an S12 page
|
||||
* table walk, or for descriptor loads during first stage of an S1
|
||||
* page table walk. Note that if we ever want to have a TLB for this
|
||||
* then various TLB flush insns which currently are no-ops or flush
|
||||
* only stage 1 MMU indexes will need to change to flush stage 2.
|
||||
*/
|
||||
ARMMMUIdx_Stage2 = 3 | ARM_MMU_IDX_NOTLB,
|
||||
ARMMMUIdx_Stage2_S = 4 | ARM_MMU_IDX_NOTLB,
|
||||
|
||||
/*
|
||||
* M-profile.
|
||||
|
@ -3017,6 +3028,8 @@ typedef enum ARMMMUIdxBit {
|
|||
TO_CORE_BIT(E20_2),
|
||||
TO_CORE_BIT(E20_2_PAN),
|
||||
TO_CORE_BIT(E3),
|
||||
TO_CORE_BIT(Stage2),
|
||||
TO_CORE_BIT(Stage2_S),
|
||||
|
||||
TO_CORE_BIT(MUser),
|
||||
TO_CORE_BIT(MPriv),
|
||||
|
@ -3388,20 +3401,6 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
|
|||
/* Shared between translate-sve.c and sve_helper.c. */
|
||||
extern const uint64_t pred_esz_masks[5];
|
||||
|
||||
/* Helper for the macros below, validating the argument type. */
|
||||
static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
|
||||
{
|
||||
return x;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lvalue macros for ARM TLB bits that we must cache in the TCG TLB.
|
||||
* Using these should be a bit more self-documenting than using the
|
||||
* generic target bits directly.
|
||||
*/
|
||||
#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
|
||||
#define arm_tlb_mte_tagged(x) (typecheck_memtxattrs(x)->target_tlb_bit1)
|
||||
|
||||
/*
|
||||
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
|
||||
* Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect
|
||||
|
|
|
@ -592,7 +592,9 @@ static void cortex_a15_initfn(Object *obj)
|
|||
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
||||
set_feature(&cpu->env, ARM_FEATURE_PMU);
|
||||
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
|
||||
cpu->midr = 0x412fc0f1;
|
||||
/* r4p0 cpu, not requiring expensive tlb flush errata */
|
||||
cpu->midr = 0x414fc0f0;
|
||||
cpu->revidr = 0x0;
|
||||
cpu->reset_fpsid = 0x410430f0;
|
||||
cpu->isar.mvfr0 = 0x10110222;
|
||||
cpu->isar.mvfr1 = 0x11111111;
|
||||
|
|
|
@ -399,6 +399,21 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
raw_write(env, ri, value);
|
||||
}
|
||||
|
||||
static int alle1_tlbmask(CPUARMState *env)
|
||||
{
|
||||
/*
|
||||
* Note that the 'ALL' scope must invalidate both stage 1 and
|
||||
* stage 2 translations, whereas most other scopes only invalidate
|
||||
* stage 1 translations.
|
||||
*/
|
||||
return (ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_0 |
|
||||
ARMMMUIdxBit_Stage2 |
|
||||
ARMMMUIdxBit_Stage2_S);
|
||||
}
|
||||
|
||||
|
||||
/* IS variants of TLB operations must affect all cores */
|
||||
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
|
@ -501,10 +516,7 @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
tlb_flush_by_mmuidx(cs,
|
||||
ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_0);
|
||||
tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
|
||||
}
|
||||
|
||||
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -512,10 +524,7 @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs,
|
||||
ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_0);
|
||||
tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env));
|
||||
}
|
||||
|
||||
|
||||
|
@ -554,6 +563,24 @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
ARMMMUIdxBit_E2);
|
||||
}
|
||||
|
||||
static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
|
||||
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
|
||||
}
|
||||
|
||||
static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
|
||||
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2);
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo cp_reginfo[] = {
|
||||
/* Define the secure and non-secure FCSE identifier CP registers
|
||||
* separately because there is no secure bank in V8 (no _EL3). This allows
|
||||
|
@ -3786,15 +3813,12 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
/*
|
||||
* A change in VMID to the stage2 page table (Stage2) invalidates
|
||||
* the combined stage 1&2 tlbs (EL10_1 and EL10_0).
|
||||
* the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
|
||||
*/
|
||||
if (raw_read(env, ri) != value) {
|
||||
uint16_t mask = ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_0;
|
||||
tlb_flush_by_mmuidx(cs, mask);
|
||||
raw_write(env, ri, value);
|
||||
if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
|
||||
tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
|
||||
}
|
||||
raw_write(env, ri, value);
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
|
||||
|
@ -4313,18 +4337,6 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
}
|
||||
}
|
||||
|
||||
static int alle1_tlbmask(CPUARMState *env)
|
||||
{
|
||||
/*
|
||||
* Note that the 'ALL' scope must invalidate both stage 1 and
|
||||
* stage 2 translations, whereas most other scopes only invalidate
|
||||
* stage 1 translations.
|
||||
*/
|
||||
return (ARMMMUIdxBit_E10_1 |
|
||||
ARMMMUIdxBit_E10_1_PAN |
|
||||
ARMMMUIdxBit_E10_0);
|
||||
}
|
||||
|
||||
static int e2_tlbmask(CPUARMState *env)
|
||||
{
|
||||
return (ARMMMUIdxBit_E20_0 |
|
||||
|
@ -4467,6 +4479,43 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
ARMMMUIdxBit_E3, bits);
|
||||
}
|
||||
|
||||
static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
|
||||
{
|
||||
/*
|
||||
* The MSB of value is the NS field, which only applies if SEL2
|
||||
* is implemented and SCR_EL3.NS is not set (i.e. in secure mode).
|
||||
*/
|
||||
return (value >= 0
|
||||
&& cpu_isar_feature(aa64_sel2, env_archcpu(env))
|
||||
&& arm_is_secure_below_el3(env)
|
||||
? ARMMMUIdxBit_Stage2_S
|
||||
: ARMMMUIdxBit_Stage2);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
int mask = ipas2e1_tlbmask(env, value);
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
|
||||
if (tlb_force_broadcast(env)) {
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
|
||||
} else {
|
||||
tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
|
||||
}
|
||||
}
|
||||
|
||||
static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
int mask = ipas2e1_tlbmask(env, value);
|
||||
uint64_t pageaddr = sextract64(value << 12, 0, 56);
|
||||
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
|
||||
}
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
typedef struct {
|
||||
uint64_t base;
|
||||
|
@ -4652,6 +4701,20 @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env,
|
|||
|
||||
do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
do_rvae_write(env, value, ipas2e1_tlbmask(env, value),
|
||||
tlb_force_broadcast(env));
|
||||
}
|
||||
|
||||
static void tlbi_aa64_ripas2e1is_write(CPUARMState *env,
|
||||
const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true);
|
||||
}
|
||||
#endif
|
||||
|
||||
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -4930,10 +4993,12 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
|
|||
.writefn = tlbi_aa64_vae1_write },
|
||||
{ .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
|
||||
.access = PL2_W, .type = ARM_CP_NOP },
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_ipas2e1is_write },
|
||||
{ .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
|
||||
.access = PL2_W, .type = ARM_CP_NOP },
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_ipas2e1is_write },
|
||||
{ .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW,
|
||||
|
@ -4944,10 +5009,12 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
|
|||
.writefn = tlbi_aa64_alle1is_write },
|
||||
{ .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
|
||||
.access = PL2_W, .type = ARM_CP_NOP },
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_ipas2e1_write },
|
||||
{ .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
|
||||
.access = PL2_W, .type = ARM_CP_NOP },
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_ipas2e1_write },
|
||||
{ .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW,
|
||||
|
@ -5028,16 +5095,20 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
|
|||
.writefn = tlbimva_hyp_is_write },
|
||||
{ .name = "TLBIIPAS2",
|
||||
.cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
|
||||
.type = ARM_CP_NOP, .access = PL2_W },
|
||||
.type = ARM_CP_NO_RAW, .access = PL2_W,
|
||||
.writefn = tlbiipas2_hyp_write },
|
||||
{ .name = "TLBIIPAS2IS",
|
||||
.cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
|
||||
.type = ARM_CP_NOP, .access = PL2_W },
|
||||
.type = ARM_CP_NO_RAW, .access = PL2_W,
|
||||
.writefn = tlbiipas2is_hyp_write },
|
||||
{ .name = "TLBIIPAS2L",
|
||||
.cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
|
||||
.type = ARM_CP_NOP, .access = PL2_W },
|
||||
.type = ARM_CP_NO_RAW, .access = PL2_W,
|
||||
.writefn = tlbiipas2_hyp_write },
|
||||
{ .name = "TLBIIPAS2LIS",
|
||||
.cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
|
||||
.type = ARM_CP_NOP, .access = PL2_W },
|
||||
.type = ARM_CP_NO_RAW, .access = PL2_W,
|
||||
.writefn = tlbiipas2is_hyp_write },
|
||||
/* 32 bit cache operations */
|
||||
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
|
||||
.type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
|
||||
|
@ -6694,10 +6765,12 @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
|
|||
.writefn = tlbi_aa64_rvae1_write },
|
||||
{ .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
|
||||
.access = PL2_W, .type = ARM_CP_NOP },
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_ripas2e1is_write },
|
||||
{ .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
|
||||
.access = PL2_W, .type = ARM_CP_NOP },
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_ripas2e1is_write },
|
||||
{ .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
|
||||
|
@ -6708,10 +6781,12 @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
|
|||
.writefn = tlbi_aa64_rvae2is_write },
|
||||
{ .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
|
||||
.access = PL2_W, .type = ARM_CP_NOP },
|
||||
{ .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_ripas2e1_write },
|
||||
{ .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
|
||||
.access = PL2_W, .type = ARM_CP_NOP },
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_ripas2e1_write },
|
||||
{ .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
|
||||
.access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
|
||||
|
|
|
@ -1095,6 +1095,7 @@ typedef struct ARMCacheAttrs {
|
|||
unsigned int attrs:8;
|
||||
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
|
||||
bool is_s2_format:1;
|
||||
bool guarded:1; /* guarded bit of the v8-64 PTE */
|
||||
} ARMCacheAttrs;
|
||||
|
||||
/* Fields that are valid upon success. */
|
||||
|
|
|
@ -105,10 +105,9 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|||
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
|
||||
return tags + index;
|
||||
#else
|
||||
uintptr_t index;
|
||||
CPUTLBEntryFull *full;
|
||||
MemTxAttrs attrs;
|
||||
int in_page, flags;
|
||||
ram_addr_t ptr_ra;
|
||||
hwaddr ptr_paddr, tag_paddr, xlat;
|
||||
MemoryRegion *mr;
|
||||
ARMASIdx tag_asi;
|
||||
|
@ -124,30 +123,12 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|||
* valid. Indicate to probe_access_flags no-fault, then assert that
|
||||
* we received a valid page.
|
||||
*/
|
||||
flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx,
|
||||
ra == 0, &host, ra);
|
||||
flags = probe_access_full(env, ptr, ptr_access, ptr_mmu_idx,
|
||||
ra == 0, &host, &full, ra);
|
||||
assert(!(flags & TLB_INVALID_MASK));
|
||||
|
||||
/*
|
||||
* Find the CPUTLBEntryFull for ptr. This *must* be present in the TLB
|
||||
* because we just found the mapping.
|
||||
* TODO: Perhaps there should be a cputlb helper that returns a
|
||||
* matching tlb entry + iotlb entry.
|
||||
*/
|
||||
index = tlb_index(env, ptr_mmu_idx, ptr);
|
||||
# ifdef CONFIG_DEBUG_TCG
|
||||
{
|
||||
CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr);
|
||||
target_ulong comparator = (ptr_access == MMU_DATA_LOAD
|
||||
? entry->addr_read
|
||||
: tlb_addr_write(entry));
|
||||
g_assert(tlb_hit(comparator, ptr));
|
||||
}
|
||||
# endif
|
||||
full = &env_tlb(env)->d[ptr_mmu_idx].fulltlb[index];
|
||||
|
||||
/* If the virtual page MemAttr != Tagged, access unchecked. */
|
||||
if (!arm_tlb_mte_tagged(&full->attrs)) {
|
||||
if (full->pte_attrs != 0xf0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -162,6 +143,14 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remember these values across the second lookup below,
|
||||
* which may invalidate this pointer via tlb resize.
|
||||
*/
|
||||
ptr_paddr = full->phys_addr;
|
||||
attrs = full->attrs;
|
||||
full = NULL;
|
||||
|
||||
/*
|
||||
* The Normal memory access can extend to the next page. E.g. a single
|
||||
* 8-byte access to the last byte of a page will check only the last
|
||||
|
@ -170,9 +159,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|||
*/
|
||||
in_page = -(ptr | TARGET_PAGE_MASK);
|
||||
if (unlikely(ptr_size > in_page)) {
|
||||
void *ignore;
|
||||
flags |= probe_access_flags(env, ptr + in_page, ptr_access,
|
||||
ptr_mmu_idx, ra == 0, &ignore, ra);
|
||||
flags |= probe_access_full(env, ptr + in_page, ptr_access,
|
||||
ptr_mmu_idx, ra == 0, &host, &full, ra);
|
||||
assert(!(flags & TLB_INVALID_MASK));
|
||||
}
|
||||
|
||||
|
@ -180,33 +168,17 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|||
if (unlikely(flags & TLB_WATCHPOINT)) {
|
||||
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
|
||||
assert(ra != 0);
|
||||
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
|
||||
full->attrs, wp, ra);
|
||||
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the physical address within the normal mem space.
|
||||
* The memory region lookup must succeed because TLB_MMIO was
|
||||
* not set in the cputlb lookup above.
|
||||
*/
|
||||
mr = memory_region_from_host(host, &ptr_ra);
|
||||
tcg_debug_assert(mr != NULL);
|
||||
tcg_debug_assert(memory_region_is_ram(mr));
|
||||
ptr_paddr = ptr_ra;
|
||||
do {
|
||||
ptr_paddr += mr->addr;
|
||||
mr = mr->container;
|
||||
} while (mr);
|
||||
|
||||
/* Convert to the physical address in tag space. */
|
||||
tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
|
||||
|
||||
/* Look up the address in tag space. */
|
||||
tag_asi = full->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
|
||||
tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
|
||||
tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
|
||||
mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
|
||||
tag_access == MMU_DATA_STORE,
|
||||
full->attrs);
|
||||
tag_access == MMU_DATA_STORE, attrs);
|
||||
|
||||
/*
|
||||
* Note that @mr will never be NULL. If there is nothing in the address
|
||||
|
|
535
target/arm/ptw.c
535
target/arm/ptw.c
|
@ -9,17 +9,35 @@
|
|||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/range.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "cpu.h"
|
||||
#include "internals.h"
|
||||
#include "idau.h"
|
||||
|
||||
|
||||
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool is_secure, bool s1_is_el0,
|
||||
typedef struct S1Translate {
|
||||
ARMMMUIdx in_mmu_idx;
|
||||
bool in_secure;
|
||||
bool in_debug;
|
||||
bool out_secure;
|
||||
bool out_be;
|
||||
hwaddr out_phys;
|
||||
void *out_host;
|
||||
} S1Translate;
|
||||
|
||||
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||
uint64_t address,
|
||||
MMUAccessType access_type, bool s1_is_el0,
|
||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
__attribute__((nonnull));
|
||||
|
||||
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
__attribute__((nonnull));
|
||||
|
||||
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
|
||||
static const uint8_t pamax_map[] = {
|
||||
[0] = 32,
|
||||
|
@ -179,6 +197,11 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|||
case ARMMMUIdx_E3:
|
||||
break;
|
||||
|
||||
case ARMMMUIdx_Phys_NS:
|
||||
case ARMMMUIdx_Phys_S:
|
||||
/* No translation for physical address spaces. */
|
||||
return true;
|
||||
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
@ -186,7 +209,7 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|||
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
|
||||
}
|
||||
|
||||
static bool ptw_attrs_are_device(uint64_t hcr, ARMCacheAttrs cacheattrs)
|
||||
static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
|
||||
{
|
||||
/*
|
||||
* For an S1 page table walk, the stage 1 attributes are always
|
||||
|
@ -197,41 +220,77 @@ static bool ptw_attrs_are_device(uint64_t hcr, ARMCacheAttrs cacheattrs)
|
|||
* With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
|
||||
* when cacheattrs.attrs bit [2] is 0.
|
||||
*/
|
||||
assert(cacheattrs.is_s2_format);
|
||||
if (hcr & HCR_FWB) {
|
||||
return (cacheattrs.attrs & 0x4) == 0;
|
||||
return (attrs & 0x4) == 0;
|
||||
} else {
|
||||
return (cacheattrs.attrs & 0xc) == 0;
|
||||
return (attrs & 0xc) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Translate a S1 pagetable walk through S2 if needed. */
|
||||
static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
hwaddr addr, bool *is_secure_ptr,
|
||||
ARMMMUFaultInfo *fi)
|
||||
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
||||
hwaddr addr, ARMMMUFaultInfo *fi)
|
||||
{
|
||||
bool is_secure = *is_secure_ptr;
|
||||
bool is_secure = ptw->in_secure;
|
||||
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
||||
ARMMMUIdx s2_mmu_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
||||
bool s2_phys = false;
|
||||
uint8_t pte_attrs;
|
||||
bool pte_secure;
|
||||
|
||||
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
|
||||
!regime_translation_disabled(env, s2_mmu_idx, is_secure)) {
|
||||
GetPhysAddrResult s2 = {};
|
||||
uint64_t hcr;
|
||||
int ret;
|
||||
if (!arm_mmu_idx_is_stage1_of_2(mmu_idx)
|
||||
|| regime_translation_disabled(env, s2_mmu_idx, is_secure)) {
|
||||
s2_mmu_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
|
||||
s2_phys = true;
|
||||
}
|
||||
|
||||
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx,
|
||||
is_secure, false, &s2, fi);
|
||||
if (ret) {
|
||||
assert(fi->type != ARMFault_None);
|
||||
fi->s2addr = addr;
|
||||
fi->stage2 = true;
|
||||
fi->s1ptw = true;
|
||||
fi->s1ns = !is_secure;
|
||||
return ~0;
|
||||
if (unlikely(ptw->in_debug)) {
|
||||
/*
|
||||
* From gdbstub, do not use softmmu so that we don't modify the
|
||||
* state of the cpu at all, including softmmu tlb contents.
|
||||
*/
|
||||
if (s2_phys) {
|
||||
ptw->out_phys = addr;
|
||||
pte_attrs = 0;
|
||||
pte_secure = is_secure;
|
||||
} else {
|
||||
S1Translate s2ptw = {
|
||||
.in_mmu_idx = s2_mmu_idx,
|
||||
.in_secure = is_secure,
|
||||
.in_debug = true,
|
||||
};
|
||||
GetPhysAddrResult s2 = { };
|
||||
if (!get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
|
||||
false, &s2, fi)) {
|
||||
goto fail;
|
||||
}
|
||||
ptw->out_phys = s2.f.phys_addr;
|
||||
pte_attrs = s2.cacheattrs.attrs;
|
||||
pte_secure = s2.f.attrs.secure;
|
||||
}
|
||||
ptw->out_host = NULL;
|
||||
} else {
|
||||
CPUTLBEntryFull *full;
|
||||
int flags;
|
||||
|
||||
hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
||||
if ((hcr & HCR_PTW) && ptw_attrs_are_device(hcr, s2.cacheattrs)) {
|
||||
env->tlb_fi = fi;
|
||||
flags = probe_access_full(env, addr, MMU_DATA_LOAD,
|
||||
arm_to_core_mmu_idx(s2_mmu_idx),
|
||||
true, &ptw->out_host, &full, 0);
|
||||
env->tlb_fi = NULL;
|
||||
|
||||
if (unlikely(flags & TLB_INVALID_MASK)) {
|
||||
goto fail;
|
||||
}
|
||||
ptw->out_phys = full->phys_addr;
|
||||
pte_attrs = full->pte_attrs;
|
||||
pte_secure = full->attrs.secure;
|
||||
}
|
||||
|
||||
if (!s2_phys) {
|
||||
uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
||||
|
||||
if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
|
||||
/*
|
||||
* PTW set and S1 walk touched S2 Device memory:
|
||||
* generate Permission fault.
|
||||
|
@ -241,81 +300,104 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|||
fi->stage2 = true;
|
||||
fi->s1ptw = true;
|
||||
fi->s1ns = !is_secure;
|
||||
return ~0;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
/* Check if page table walk is to secure or non-secure PA space. */
|
||||
if (is_secure) {
|
||||
is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
|
||||
} else {
|
||||
is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
|
||||
}
|
||||
*is_secure_ptr = is_secure;
|
||||
} else {
|
||||
assert(!is_secure);
|
||||
}
|
||||
|
||||
addr = s2.f.phys_addr;
|
||||
}
|
||||
return addr;
|
||||
|
||||
/* Check if page table walk is to secure or non-secure PA space. */
|
||||
ptw->out_secure = (is_secure
|
||||
&& !(pte_secure
|
||||
? env->cp15.vstcr_el2 & VSTCR_SW
|
||||
: env->cp15.vtcr_el2 & VTCR_NSW));
|
||||
ptw->out_be = regime_translation_big_endian(env, mmu_idx);
|
||||
return true;
|
||||
|
||||
fail:
|
||||
assert(fi->type != ARMFault_None);
|
||||
fi->s2addr = addr;
|
||||
fi->stage2 = true;
|
||||
fi->s1ptw = true;
|
||||
fi->s1ns = !is_secure;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* All loads done in the course of a page table walk go through here. */
|
||||
static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
|
||||
ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
|
||||
static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw, hwaddr addr,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
MemTxAttrs attrs = {};
|
||||
MemTxResult result = MEMTX_OK;
|
||||
AddressSpace *as;
|
||||
uint32_t data;
|
||||
|
||||
addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
|
||||
attrs.secure = is_secure;
|
||||
as = arm_addressspace(cs, attrs);
|
||||
if (fi->s1ptw) {
|
||||
if (!S1_ptw_translate(env, ptw, addr, fi)) {
|
||||
/* Failure. */
|
||||
assert(fi->s1ptw);
|
||||
return 0;
|
||||
}
|
||||
if (regime_translation_big_endian(env, mmu_idx)) {
|
||||
data = address_space_ldl_be(as, addr, attrs, &result);
|
||||
|
||||
if (likely(ptw->out_host)) {
|
||||
/* Page tables are in RAM, and we have the host address. */
|
||||
if (ptw->out_be) {
|
||||
data = ldl_be_p(ptw->out_host);
|
||||
} else {
|
||||
data = ldl_le_p(ptw->out_host);
|
||||
}
|
||||
} else {
|
||||
data = address_space_ldl_le(as, addr, attrs, &result);
|
||||
/* Page tables are in MMIO. */
|
||||
MemTxAttrs attrs = { .secure = ptw->out_secure };
|
||||
AddressSpace *as = arm_addressspace(cs, attrs);
|
||||
MemTxResult result = MEMTX_OK;
|
||||
|
||||
if (ptw->out_be) {
|
||||
data = address_space_ldl_be(as, ptw->out_phys, attrs, &result);
|
||||
} else {
|
||||
data = address_space_ldl_le(as, ptw->out_phys, attrs, &result);
|
||||
}
|
||||
if (unlikely(result != MEMTX_OK)) {
|
||||
fi->type = ARMFault_SyncExternalOnWalk;
|
||||
fi->ea = arm_extabort_type(result);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (result == MEMTX_OK) {
|
||||
return data;
|
||||
}
|
||||
fi->type = ARMFault_SyncExternalOnWalk;
|
||||
fi->ea = arm_extabort_type(result);
|
||||
return 0;
|
||||
return data;
|
||||
}
|
||||
|
||||
static uint64_t arm_ldq_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
|
||||
ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
|
||||
static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw, hwaddr addr,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
MemTxAttrs attrs = {};
|
||||
MemTxResult result = MEMTX_OK;
|
||||
AddressSpace *as;
|
||||
uint64_t data;
|
||||
|
||||
addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
|
||||
attrs.secure = is_secure;
|
||||
as = arm_addressspace(cs, attrs);
|
||||
if (fi->s1ptw) {
|
||||
if (!S1_ptw_translate(env, ptw, addr, fi)) {
|
||||
/* Failure. */
|
||||
assert(fi->s1ptw);
|
||||
return 0;
|
||||
}
|
||||
if (regime_translation_big_endian(env, mmu_idx)) {
|
||||
data = address_space_ldq_be(as, addr, attrs, &result);
|
||||
|
||||
if (likely(ptw->out_host)) {
|
||||
/* Page tables are in RAM, and we have the host address. */
|
||||
if (ptw->out_be) {
|
||||
data = ldq_be_p(ptw->out_host);
|
||||
} else {
|
||||
data = ldq_le_p(ptw->out_host);
|
||||
}
|
||||
} else {
|
||||
data = address_space_ldq_le(as, addr, attrs, &result);
|
||||
/* Page tables are in MMIO. */
|
||||
MemTxAttrs attrs = { .secure = ptw->out_secure };
|
||||
AddressSpace *as = arm_addressspace(cs, attrs);
|
||||
MemTxResult result = MEMTX_OK;
|
||||
|
||||
if (ptw->out_be) {
|
||||
data = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
|
||||
} else {
|
||||
data = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
|
||||
}
|
||||
if (unlikely(result != MEMTX_OK)) {
|
||||
fi->type = ARMFault_SyncExternalOnWalk;
|
||||
fi->ea = arm_extabort_type(result);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (result == MEMTX_OK) {
|
||||
return data;
|
||||
}
|
||||
fi->type = ARMFault_SyncExternalOnWalk;
|
||||
fi->ea = arm_extabort_type(result);
|
||||
return 0;
|
||||
return data;
|
||||
}
|
||||
|
||||
static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
|
@ -426,10 +508,9 @@ static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
|
|||
return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
|
||||
}
|
||||
|
||||
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool is_secure, GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
|
||||
uint32_t address, MMUAccessType access_type,
|
||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
{
|
||||
int level = 1;
|
||||
uint32_t table;
|
||||
|
@ -443,18 +524,18 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|||
|
||||
/* Pagetable walk. */
|
||||
/* Lookup l1 descriptor. */
|
||||
if (!get_level1_table_address(env, mmu_idx, &table, address)) {
|
||||
if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) {
|
||||
/* Section translation fault if page walk is disabled by PD0 or PD1 */
|
||||
fi->type = ARMFault_Translation;
|
||||
goto do_fault;
|
||||
}
|
||||
desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
|
||||
desc = arm_ldl_ptw(env, ptw, table, fi);
|
||||
if (fi->type != ARMFault_None) {
|
||||
goto do_fault;
|
||||
}
|
||||
type = (desc & 3);
|
||||
domain = (desc >> 5) & 0x0f;
|
||||
if (regime_el(env, mmu_idx) == 1) {
|
||||
if (regime_el(env, ptw->in_mmu_idx) == 1) {
|
||||
dacr = env->cp15.dacr_ns;
|
||||
} else {
|
||||
dacr = env->cp15.dacr_s;
|
||||
|
@ -486,7 +567,7 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|||
/* Fine pagetable. */
|
||||
table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
|
||||
}
|
||||
desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
|
||||
desc = arm_ldl_ptw(env, ptw, table, fi);
|
||||
if (fi->type != ARMFault_None) {
|
||||
goto do_fault;
|
||||
}
|
||||
|
@ -530,7 +611,7 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
|
||||
result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
|
||||
result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
|
||||
if (!(result->f.prot & (1 << access_type))) {
|
||||
/* Access permission fault. */
|
||||
|
@ -545,12 +626,12 @@ do_fault:
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool is_secure, GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
|
||||
uint32_t address, MMUAccessType access_type,
|
||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
||||
int level = 1;
|
||||
uint32_t table;
|
||||
uint32_t desc;
|
||||
|
@ -571,7 +652,7 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|||
fi->type = ARMFault_Translation;
|
||||
goto do_fault;
|
||||
}
|
||||
desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
|
||||
desc = arm_ldl_ptw(env, ptw, table, fi);
|
||||
if (fi->type != ARMFault_None) {
|
||||
goto do_fault;
|
||||
}
|
||||
|
@ -624,7 +705,7 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|||
ns = extract32(desc, 3, 1);
|
||||
/* Lookup l2 entry. */
|
||||
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
|
||||
desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
|
||||
desc = arm_ldl_ptw(env, ptw, table, fi);
|
||||
if (fi->type != ARMFault_None) {
|
||||
goto do_fault;
|
||||
}
|
||||
|
@ -967,22 +1048,25 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
|
|||
* the WnR bit is never set (the caller must do this).
|
||||
*
|
||||
* @env: CPUARMState
|
||||
* @ptw: Current and next stage parameters for the walk.
|
||||
* @address: virtual address to get physical address for
|
||||
* @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
|
||||
* @mmu_idx: MMU index indicating required translation regime
|
||||
* @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page
|
||||
* table walk), must be true if this is stage 2 of a stage 1+2
|
||||
* @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
|
||||
* (so this is a stage 2 page table walk),
|
||||
* must be true if this is stage 2 of a stage 1+2
|
||||
* walk for an EL0 access. If @mmu_idx is anything else,
|
||||
* @s1_is_el0 is ignored.
|
||||
* @result: set on translation success,
|
||||
* @fi: set to fault info if the translation fails
|
||||
*/
|
||||
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool is_secure, bool s1_is_el0,
|
||||
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||
uint64_t address,
|
||||
MMUAccessType access_type, bool s1_is_el0,
|
||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
||||
bool is_secure = ptw->in_secure;
|
||||
/* Read an LPAE long-descriptor translation table. */
|
||||
ARMFaultType fault_type = ARMFault_Translation;
|
||||
uint32_t level;
|
||||
|
@ -1199,7 +1283,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
|||
descaddr |= (address >> (stride * (4 - level))) & indexmask;
|
||||
descaddr &= ~7ULL;
|
||||
nstable = extract32(tableattrs, 4, 1);
|
||||
descriptor = arm_ldq_ptw(env, descaddr, !nstable, mmu_idx, fi);
|
||||
ptw->in_secure = !nstable;
|
||||
descriptor = arm_ldq_ptw(env, ptw, descaddr, fi);
|
||||
if (fi->type != ARMFault_None) {
|
||||
goto do_fault;
|
||||
}
|
||||
|
@ -1313,9 +1398,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
|||
*/
|
||||
result->f.attrs.secure = false;
|
||||
}
|
||||
/* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
|
||||
if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
|
||||
arm_tlb_bti_gp(&result->f.attrs) = true;
|
||||
|
||||
/* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
|
||||
if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
|
||||
result->f.guarded = guarded;
|
||||
}
|
||||
|
||||
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
|
||||
|
@ -2279,10 +2365,17 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
|
|||
{
|
||||
uint8_t memattr = 0x00; /* Device nGnRnE */
|
||||
uint8_t shareability = 0; /* non-sharable */
|
||||
int r_el;
|
||||
|
||||
if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
|
||||
int r_el = regime_el(env, mmu_idx);
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_Stage2:
|
||||
case ARMMMUIdx_Stage2_S:
|
||||
case ARMMMUIdx_Phys_NS:
|
||||
case ARMMMUIdx_Phys_S:
|
||||
break;
|
||||
|
||||
default:
|
||||
r_el = regime_el(env, mmu_idx);
|
||||
if (arm_el_is_aa64(env, r_el)) {
|
||||
int pamax = arm_pamax(env_archcpu(env));
|
||||
uint64_t tcr = env->cp15.tcr_el[r_el];
|
||||
|
@ -2331,6 +2424,7 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
|
|||
shareability = 2; /* outer sharable */
|
||||
}
|
||||
result->cacheattrs.is_s2_format = false;
|
||||
break;
|
||||
}
|
||||
|
||||
result->f.phys_addr = address;
|
||||
|
@ -2338,111 +2432,116 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
|
|||
result->f.lg_page_size = TARGET_PAGE_BITS;
|
||||
result->cacheattrs.shareability = shareability;
|
||||
result->cacheattrs.attrs = memattr;
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool is_secure, GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
hwaddr ipa;
|
||||
int s1_prot;
|
||||
bool is_secure = ptw->in_secure;
|
||||
bool ret, ipa_secure, s2walk_secure;
|
||||
ARMCacheAttrs cacheattrs1;
|
||||
bool is_el0;
|
||||
uint64_t hcr;
|
||||
|
||||
ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
|
||||
|
||||
/* If S1 fails or S2 is disabled, return early. */
|
||||
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ipa = result->f.phys_addr;
|
||||
ipa_secure = result->f.attrs.secure;
|
||||
if (is_secure) {
|
||||
/* Select TCR based on the NS bit from the S1 walk. */
|
||||
s2walk_secure = !(ipa_secure
|
||||
? env->cp15.vstcr_el2 & VSTCR_SW
|
||||
: env->cp15.vtcr_el2 & VTCR_NSW);
|
||||
} else {
|
||||
assert(!ipa_secure);
|
||||
s2walk_secure = false;
|
||||
}
|
||||
|
||||
is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
|
||||
ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
||||
ptw->in_secure = s2walk_secure;
|
||||
|
||||
/*
|
||||
* S1 is done, now do S2 translation.
|
||||
* Save the stage1 results so that we may merge prot and cacheattrs later.
|
||||
*/
|
||||
s1_prot = result->f.prot;
|
||||
cacheattrs1 = result->cacheattrs;
|
||||
memset(result, 0, sizeof(*result));
|
||||
|
||||
ret = get_phys_addr_lpae(env, ptw, ipa, access_type, is_el0, result, fi);
|
||||
fi->s2addr = ipa;
|
||||
|
||||
/* Combine the S1 and S2 perms. */
|
||||
result->f.prot &= s1_prot;
|
||||
|
||||
/* If S2 fails, return early. */
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Combine the S1 and S2 cache attributes. */
|
||||
hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
||||
if (hcr & HCR_DC) {
|
||||
/*
|
||||
* HCR.DC forces the first stage attributes to
|
||||
* Normal Non-Shareable,
|
||||
* Inner Write-Back Read-Allocate Write-Allocate,
|
||||
* Outer Write-Back Read-Allocate Write-Allocate.
|
||||
* Do not overwrite Tagged within attrs.
|
||||
*/
|
||||
if (cacheattrs1.attrs != 0xf0) {
|
||||
cacheattrs1.attrs = 0xff;
|
||||
}
|
||||
cacheattrs1.shareability = 0;
|
||||
}
|
||||
result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
|
||||
result->cacheattrs);
|
||||
|
||||
/*
|
||||
* Check if IPA translates to secure or non-secure PA space.
|
||||
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
|
||||
*/
|
||||
result->f.attrs.secure =
|
||||
(is_secure
|
||||
&& !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
|
||||
&& (ipa_secure
|
||||
|| !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
||||
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
|
||||
bool is_secure = ptw->in_secure;
|
||||
|
||||
if (mmu_idx != s1_mmu_idx) {
|
||||
/*
|
||||
* Call ourselves recursively to do the stage 1 and then stage 2
|
||||
* translations if mmu_idx is a two-stage regime.
|
||||
* translations if mmu_idx is a two-stage regime, and EL2 present.
|
||||
* Otherwise, a stage1+stage2 translation is just stage 1.
|
||||
*/
|
||||
ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
|
||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
hwaddr ipa;
|
||||
int s1_prot;
|
||||
int ret;
|
||||
bool ipa_secure, s2walk_secure;
|
||||
ARMCacheAttrs cacheattrs1;
|
||||
ARMMMUIdx s2_mmu_idx;
|
||||
bool is_el0;
|
||||
uint64_t hcr;
|
||||
|
||||
ret = get_phys_addr_with_secure(env, address, access_type,
|
||||
s1_mmu_idx, is_secure, result, fi);
|
||||
|
||||
/* If S1 fails or S2 is disabled, return early. */
|
||||
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2,
|
||||
is_secure)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ipa = result->f.phys_addr;
|
||||
ipa_secure = result->f.attrs.secure;
|
||||
if (is_secure) {
|
||||
/* Select TCR based on the NS bit from the S1 walk. */
|
||||
s2walk_secure = !(ipa_secure
|
||||
? env->cp15.vstcr_el2 & VSTCR_SW
|
||||
: env->cp15.vtcr_el2 & VTCR_NSW);
|
||||
} else {
|
||||
assert(!ipa_secure);
|
||||
s2walk_secure = false;
|
||||
}
|
||||
|
||||
s2_mmu_idx = (s2walk_secure
|
||||
? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2);
|
||||
is_el0 = mmu_idx == ARMMMUIdx_E10_0;
|
||||
|
||||
/*
|
||||
* S1 is done, now do S2 translation.
|
||||
* Save the stage1 results so that we may merge
|
||||
* prot and cacheattrs later.
|
||||
*/
|
||||
s1_prot = result->f.prot;
|
||||
cacheattrs1 = result->cacheattrs;
|
||||
memset(result, 0, sizeof(*result));
|
||||
|
||||
ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx,
|
||||
s2walk_secure, is_el0, result, fi);
|
||||
fi->s2addr = ipa;
|
||||
|
||||
/* Combine the S1 and S2 perms. */
|
||||
result->f.prot &= s1_prot;
|
||||
|
||||
/* If S2 fails, return early. */
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Combine the S1 and S2 cache attributes. */
|
||||
hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
||||
if (hcr & HCR_DC) {
|
||||
/*
|
||||
* HCR.DC forces the first stage attributes to
|
||||
* Normal Non-Shareable,
|
||||
* Inner Write-Back Read-Allocate Write-Allocate,
|
||||
* Outer Write-Back Read-Allocate Write-Allocate.
|
||||
* Do not overwrite Tagged within attrs.
|
||||
*/
|
||||
if (cacheattrs1.attrs != 0xf0) {
|
||||
cacheattrs1.attrs = 0xff;
|
||||
}
|
||||
cacheattrs1.shareability = 0;
|
||||
}
|
||||
result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
|
||||
result->cacheattrs);
|
||||
|
||||
/*
|
||||
* Check if IPA translates to secure or non-secure PA space.
|
||||
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
|
||||
*/
|
||||
result->f.attrs.secure =
|
||||
(is_secure
|
||||
&& !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
|
||||
&& (ipa_secure
|
||||
|| !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
/*
|
||||
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
|
||||
*/
|
||||
mmu_idx = stage_1_mmu_idx(mmu_idx);
|
||||
return get_phys_addr_twostage(env, ptw, address, access_type,
|
||||
result, fi);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2503,18 +2602,30 @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
|
|||
return get_phys_addr_disabled(env, address, access_type, mmu_idx,
|
||||
is_secure, result, fi);
|
||||
}
|
||||
|
||||
if (regime_using_lpae_format(env, mmu_idx)) {
|
||||
return get_phys_addr_lpae(env, address, access_type, mmu_idx,
|
||||
is_secure, false, result, fi);
|
||||
return get_phys_addr_lpae(env, ptw, address, access_type, false,
|
||||
result, fi);
|
||||
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
|
||||
return get_phys_addr_v6(env, address, access_type, mmu_idx,
|
||||
is_secure, result, fi);
|
||||
return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
|
||||
} else {
|
||||
return get_phys_addr_v5(env, address, access_type, mmu_idx,
|
||||
is_secure, result, fi);
|
||||
return get_phys_addr_v5(env, ptw, address, access_type, result, fi);
|
||||
}
|
||||
}
|
||||
|
||||
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool is_secure, GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
S1Translate ptw = {
|
||||
.in_mmu_idx = mmu_idx,
|
||||
.in_secure = is_secure,
|
||||
};
|
||||
return get_phys_addr_with_struct(env, &ptw, address, access_type,
|
||||
result, fi);
|
||||
}
|
||||
|
||||
bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
|
@ -2535,6 +2646,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|||
is_secure = arm_is_secure_below_el3(env);
|
||||
break;
|
||||
case ARMMMUIdx_Stage2:
|
||||
case ARMMMUIdx_Phys_NS:
|
||||
case ARMMMUIdx_MPrivNegPri:
|
||||
case ARMMMUIdx_MUserNegPri:
|
||||
case ARMMMUIdx_MPriv:
|
||||
|
@ -2543,6 +2655,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|||
break;
|
||||
case ARMMMUIdx_E3:
|
||||
case ARMMMUIdx_Stage2_S:
|
||||
case ARMMMUIdx_Phys_S:
|
||||
case ARMMMUIdx_MSPrivNegPri:
|
||||
case ARMMMUIdx_MSUserNegPri:
|
||||
case ARMMMUIdx_MSPriv:
|
||||
|
@ -2561,12 +2674,16 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
|||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
S1Translate ptw = {
|
||||
.in_mmu_idx = arm_mmu_idx(env),
|
||||
.in_secure = arm_is_secure(env),
|
||||
.in_debug = true,
|
||||
};
|
||||
GetPhysAddrResult res = {};
|
||||
ARMMMUFaultInfo fi = {};
|
||||
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
|
||||
bool ret;
|
||||
|
||||
ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi);
|
||||
ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
|
||||
*attrs = res.f.attrs;
|
||||
|
||||
if (ret) {
|
||||
|
|
|
@ -5351,8 +5351,19 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
|
|||
*/
|
||||
addr = useronly_clean_ptr(addr);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault,
|
||||
&info->host, retaddr);
|
||||
memset(&info->attrs, 0, sizeof(info->attrs));
|
||||
/* Require both ANON and MTE; see allocation_tag_mem(). */
|
||||
info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE);
|
||||
#else
|
||||
CPUTLBEntryFull *full;
|
||||
flags = probe_access_full(env, addr, access_type, mmu_idx, nofault,
|
||||
&info->host, &full, retaddr);
|
||||
info->attrs = full->attrs;
|
||||
info->tagged = full->pte_attrs == 0xf0;
|
||||
#endif
|
||||
info->flags = flags;
|
||||
|
||||
if (flags & TLB_INVALID_MASK) {
|
||||
|
@ -5362,33 +5373,6 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
|
|||
|
||||
/* Ensure that info->host[] is relative to addr, not addr + mem_off. */
|
||||
info->host -= mem_off;
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
memset(&info->attrs, 0, sizeof(info->attrs));
|
||||
/* Require both MAP_ANON and PROT_MTE -- see allocation_tag_mem. */
|
||||
arm_tlb_mte_tagged(&info->attrs) =
|
||||
(flags & PAGE_ANON) && (flags & PAGE_MTE);
|
||||
#else
|
||||
/*
|
||||
* Find the iotlbentry for addr and return the transaction attributes.
|
||||
* This *must* be present in the TLB because we just found the mapping.
|
||||
*/
|
||||
{
|
||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||
|
||||
# ifdef CONFIG_DEBUG_TCG
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
target_ulong comparator = (access_type == MMU_DATA_LOAD
|
||||
? entry->addr_read
|
||||
: tlb_addr_write(entry));
|
||||
g_assert(tlb_hit(comparator, addr));
|
||||
# endif
|
||||
|
||||
CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
||||
info->attrs = full->attrs;
|
||||
}
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -5617,7 +5601,7 @@ void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
|
|||
intptr_t mem_off, reg_off, reg_last;
|
||||
|
||||
/* Process the page only if MemAttr == Tagged. */
|
||||
if (arm_tlb_mte_tagged(&info->page[0].attrs)) {
|
||||
if (info->page[0].tagged) {
|
||||
mem_off = info->mem_off_first[0];
|
||||
reg_off = info->reg_off_first[0];
|
||||
reg_last = info->reg_off_split;
|
||||
|
@ -5638,7 +5622,7 @@ void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
|
|||
}
|
||||
|
||||
mem_off = info->mem_off_first[1];
|
||||
if (mem_off >= 0 && arm_tlb_mte_tagged(&info->page[1].attrs)) {
|
||||
if (mem_off >= 0 && info->page[1].tagged) {
|
||||
reg_off = info->reg_off_first[1];
|
||||
reg_last = info->reg_off_last[1];
|
||||
|
||||
|
@ -6017,7 +6001,7 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
|
|||
* Disable MTE checking if the Tagged bit is not set. Since TBI must
|
||||
* be set within MTEDESC for MTE, !mtedesc => !mte_active.
|
||||
*/
|
||||
if (!arm_tlb_mte_tagged(&info.page[0].attrs)) {
|
||||
if (!info.page[0].tagged) {
|
||||
mtedesc = 0;
|
||||
}
|
||||
|
||||
|
@ -6568,7 +6552,7 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
|||
cpu_check_watchpoint(env_cpu(env), addr, msize,
|
||||
info.attrs, BP_MEM_READ, retaddr);
|
||||
}
|
||||
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
|
||||
if (mtedesc && info.tagged) {
|
||||
mte_check(env, mtedesc, addr, retaddr);
|
||||
}
|
||||
if (unlikely(info.flags & TLB_MMIO)) {
|
||||
|
@ -6585,7 +6569,7 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
|||
msize, info.attrs,
|
||||
BP_MEM_READ, retaddr);
|
||||
}
|
||||
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
|
||||
if (mtedesc && info.tagged) {
|
||||
mte_check(env, mtedesc, addr, retaddr);
|
||||
}
|
||||
tlb_fn(env, &scratch, reg_off, addr, retaddr);
|
||||
|
@ -6786,9 +6770,7 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
|||
(env_cpu(env), addr, msize) & BP_MEM_READ)) {
|
||||
goto fault;
|
||||
}
|
||||
if (mtedesc &&
|
||||
arm_tlb_mte_tagged(&info.attrs) &&
|
||||
!mte_probe(env, mtedesc, addr)) {
|
||||
if (mtedesc && info.tagged && !mte_probe(env, mtedesc, addr)) {
|
||||
goto fault;
|
||||
}
|
||||
|
||||
|
@ -6974,7 +6956,7 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
|||
info.attrs, BP_MEM_WRITE, retaddr);
|
||||
}
|
||||
|
||||
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
|
||||
if (mtedesc && info.tagged) {
|
||||
mte_check(env, mtedesc, addr, retaddr);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -134,6 +134,7 @@ typedef struct {
|
|||
void *host;
|
||||
int flags;
|
||||
MemTxAttrs attrs;
|
||||
bool tagged;
|
||||
} SVEHostPage;
|
||||
|
||||
bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
|
||||
|
|
|
@ -208,10 +208,21 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|||
bool probe, uintptr_t retaddr)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
ARMMMUFaultInfo fi = {};
|
||||
GetPhysAddrResult res = {};
|
||||
ARMMMUFaultInfo local_fi, *fi;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allow S1_ptw_translate to see any fault generated here.
|
||||
* Since this may recurse, read and clear.
|
||||
*/
|
||||
fi = cpu->env.tlb_fi;
|
||||
if (fi) {
|
||||
cpu->env.tlb_fi = NULL;
|
||||
} else {
|
||||
fi = memset(&local_fi, 0, sizeof(local_fi));
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk the page table and (if the mapping exists) add the page
|
||||
* to the TLB. On success, return true. Otherwise, if probing,
|
||||
|
@ -220,7 +231,7 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|||
*/
|
||||
ret = get_phys_addr(&cpu->env, address, access_type,
|
||||
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
|
||||
&res, &fi);
|
||||
&res, fi);
|
||||
if (likely(!ret)) {
|
||||
/*
|
||||
* Map a single [sub]page. Regions smaller than our declared
|
||||
|
@ -231,10 +242,9 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|||
res.f.phys_addr &= TARGET_PAGE_MASK;
|
||||
address &= TARGET_PAGE_MASK;
|
||||
}
|
||||
/* Notice and record tagged memory. */
|
||||
if (cpu_isar_feature(aa64_mte, cpu) && res.cacheattrs.attrs == 0xf0) {
|
||||
arm_tlb_mte_tagged(&res.f.attrs) = true;
|
||||
}
|
||||
|
||||
res.f.pte_attrs = res.cacheattrs.attrs;
|
||||
res.f.shareability = res.cacheattrs.shareability;
|
||||
|
||||
tlb_set_page_full(cs, mmu_idx, address, &res.f);
|
||||
return true;
|
||||
|
@ -243,7 +253,7 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|||
} else {
|
||||
/* now we have a real cpu fault */
|
||||
cpu_restore_state(cs, retaddr, true);
|
||||
arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
|
||||
arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -40,7 +40,7 @@ void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop);
|
|||
TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs);
|
||||
void gen_set_cpsr(TCGv_i32 var, uint32_t mask);
|
||||
void gen_set_condexec(DisasContext *s);
|
||||
void gen_set_pc_im(DisasContext *s, target_ulong val);
|
||||
void gen_update_pc(DisasContext *s, target_long diff);
|
||||
void gen_lookup_tb(DisasContext *s);
|
||||
long vfp_reg_offset(bool dp, unsigned reg);
|
||||
long neon_full_reg_offset(unsigned reg);
|
||||
|
|
|
@ -140,9 +140,20 @@ static void reset_btype(DisasContext *s)
|
|||
}
|
||||
}
|
||||
|
||||
void gen_a64_set_pc_im(uint64_t val)
|
||||
static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff)
|
||||
{
|
||||
tcg_gen_movi_i64(cpu_pc, val);
|
||||
assert(s->pc_save != -1);
|
||||
if (TARGET_TB_PCREL) {
|
||||
tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff);
|
||||
} else {
|
||||
tcg_gen_movi_i64(dest, s->pc_curr + diff);
|
||||
}
|
||||
}
|
||||
|
||||
void gen_a64_update_pc(DisasContext *s, target_long diff)
|
||||
{
|
||||
gen_pc_plus_diff(s, cpu_pc, diff);
|
||||
s->pc_save = s->pc_curr + diff;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -196,6 +207,7 @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
|
|||
* then loading an address into the PC will clear out any tag.
|
||||
*/
|
||||
gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
|
||||
s->pc_save = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -332,16 +344,16 @@ static void gen_exception_internal(int excp)
|
|||
gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp));
|
||||
}
|
||||
|
||||
static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
|
||||
static void gen_exception_internal_insn(DisasContext *s, int excp)
|
||||
{
|
||||
gen_a64_set_pc_im(pc);
|
||||
gen_a64_update_pc(s, 0);
|
||||
gen_exception_internal(excp);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
||||
static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
|
||||
{
|
||||
gen_a64_set_pc_im(s->pc_curr);
|
||||
gen_a64_update_pc(s, 0);
|
||||
gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome));
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
@ -370,15 +382,28 @@ static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
|
|||
return translator_use_goto_tb(&s->base, dest);
|
||||
}
|
||||
|
||||
static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
|
||||
static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
|
||||
{
|
||||
if (use_goto_tb(s, dest)) {
|
||||
tcg_gen_goto_tb(n);
|
||||
gen_a64_set_pc_im(dest);
|
||||
if (use_goto_tb(s, s->pc_curr + diff)) {
|
||||
/*
|
||||
* For pcrel, the pc must always be up-to-date on entry to
|
||||
* the linked TB, so that it can use simple additions for all
|
||||
* further adjustments. For !pcrel, the linked TB is compiled
|
||||
* to know its full virtual address, so we can delay the
|
||||
* update to pc to the unlinked path. A long chain of links
|
||||
* can thus avoid many updates to the PC.
|
||||
*/
|
||||
if (TARGET_TB_PCREL) {
|
||||
gen_a64_update_pc(s, diff);
|
||||
tcg_gen_goto_tb(n);
|
||||
} else {
|
||||
tcg_gen_goto_tb(n);
|
||||
gen_a64_update_pc(s, diff);
|
||||
}
|
||||
tcg_gen_exit_tb(s->base.tb, n);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
} else {
|
||||
gen_a64_set_pc_im(dest);
|
||||
gen_a64_update_pc(s, diff);
|
||||
if (s->ss_active) {
|
||||
gen_step_complete_exception(s);
|
||||
} else {
|
||||
|
@ -1153,7 +1178,7 @@ static bool fp_access_check_only(DisasContext *s)
|
|||
assert(!s->fp_access_checked);
|
||||
s->fp_access_checked = true;
|
||||
|
||||
gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
|
||||
gen_exception_insn_el(s, 0, EXCP_UDEF,
|
||||
syn_fp_access_trap(1, 0xe, false, 0),
|
||||
s->fp_excp_el);
|
||||
return false;
|
||||
|
@ -1168,7 +1193,7 @@ static bool fp_access_check(DisasContext *s)
|
|||
return false;
|
||||
}
|
||||
if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
||||
gen_exception_insn(s, 0, EXCP_UDEF,
|
||||
syn_smetrap(SME_ET_Streaming, false));
|
||||
return false;
|
||||
}
|
||||
|
@ -1188,7 +1213,7 @@ bool sve_access_check(DisasContext *s)
|
|||
goto fail_exit;
|
||||
}
|
||||
} else if (s->sve_excp_el) {
|
||||
gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
|
||||
gen_exception_insn_el(s, 0, EXCP_UDEF,
|
||||
syn_sve_access_trap(), s->sve_excp_el);
|
||||
goto fail_exit;
|
||||
}
|
||||
|
@ -1210,7 +1235,7 @@ bool sve_access_check(DisasContext *s)
|
|||
static bool sme_access_check(DisasContext *s)
|
||||
{
|
||||
if (s->sme_excp_el) {
|
||||
gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
|
||||
gen_exception_insn_el(s, 0, EXCP_UDEF,
|
||||
syn_smetrap(SME_ET_AccessTrap, false),
|
||||
s->sme_excp_el);
|
||||
return false;
|
||||
|
@ -1240,12 +1265,12 @@ bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
|
|||
return false;
|
||||
}
|
||||
if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
||||
gen_exception_insn(s, 0, EXCP_UDEF,
|
||||
syn_smetrap(SME_ET_NotStreaming, false));
|
||||
return false;
|
||||
}
|
||||
if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
||||
gen_exception_insn(s, 0, EXCP_UDEF,
|
||||
syn_smetrap(SME_ET_InactiveZA, false));
|
||||
return false;
|
||||
}
|
||||
|
@ -1354,16 +1379,16 @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
|
|||
*/
|
||||
static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
|
||||
int64_t diff = sextract32(insn, 0, 26) * 4;
|
||||
|
||||
if (insn & (1U << 31)) {
|
||||
/* BL Branch with link */
|
||||
tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
|
||||
gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s));
|
||||
}
|
||||
|
||||
/* B Branch / BL Branch with link */
|
||||
reset_btype(s);
|
||||
gen_goto_tb(s, 0, addr);
|
||||
gen_goto_tb(s, 0, diff);
|
||||
}
|
||||
|
||||
/* Compare and branch (immediate)
|
||||
|
@ -1375,25 +1400,24 @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
|
|||
static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
unsigned int sf, op, rt;
|
||||
uint64_t addr;
|
||||
TCGLabel *label_match;
|
||||
int64_t diff;
|
||||
DisasLabel match;
|
||||
TCGv_i64 tcg_cmp;
|
||||
|
||||
sf = extract32(insn, 31, 1);
|
||||
op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
|
||||
rt = extract32(insn, 0, 5);
|
||||
addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
|
||||
diff = sextract32(insn, 5, 19) * 4;
|
||||
|
||||
tcg_cmp = read_cpu_reg(s, rt, sf);
|
||||
label_match = gen_new_label();
|
||||
|
||||
reset_btype(s);
|
||||
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
|
||||
tcg_cmp, 0, label_match);
|
||||
|
||||
gen_goto_tb(s, 0, s->base.pc_next);
|
||||
gen_set_label(label_match);
|
||||
gen_goto_tb(s, 1, addr);
|
||||
match = gen_disas_label(s);
|
||||
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
|
||||
tcg_cmp, 0, match.label);
|
||||
gen_goto_tb(s, 0, 4);
|
||||
set_disas_label(s, match);
|
||||
gen_goto_tb(s, 1, diff);
|
||||
}
|
||||
|
||||
/* Test and branch (immediate)
|
||||
|
@ -1405,26 +1429,27 @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
|
|||
static void disas_test_b_imm(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
unsigned int bit_pos, op, rt;
|
||||
uint64_t addr;
|
||||
TCGLabel *label_match;
|
||||
int64_t diff;
|
||||
DisasLabel match;
|
||||
TCGv_i64 tcg_cmp;
|
||||
|
||||
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
|
||||
op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
|
||||
addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
|
||||
diff = sextract32(insn, 5, 14) * 4;
|
||||
rt = extract32(insn, 0, 5);
|
||||
|
||||
tcg_cmp = tcg_temp_new_i64();
|
||||
tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
|
||||
label_match = gen_new_label();
|
||||
|
||||
reset_btype(s);
|
||||
|
||||
match = gen_disas_label(s);
|
||||
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
|
||||
tcg_cmp, 0, label_match);
|
||||
tcg_cmp, 0, match.label);
|
||||
tcg_temp_free_i64(tcg_cmp);
|
||||
gen_goto_tb(s, 0, s->base.pc_next);
|
||||
gen_set_label(label_match);
|
||||
gen_goto_tb(s, 1, addr);
|
||||
gen_goto_tb(s, 0, 4);
|
||||
set_disas_label(s, match);
|
||||
gen_goto_tb(s, 1, diff);
|
||||
}
|
||||
|
||||
/* Conditional branch (immediate)
|
||||
|
@ -1436,26 +1461,26 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
|
|||
static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
unsigned int cond;
|
||||
uint64_t addr;
|
||||
int64_t diff;
|
||||
|
||||
if ((insn & (1 << 4)) || (insn & (1 << 24))) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
|
||||
diff = sextract32(insn, 5, 19) * 4;
|
||||
cond = extract32(insn, 0, 4);
|
||||
|
||||
reset_btype(s);
|
||||
if (cond < 0x0e) {
|
||||
/* genuinely conditional branches */
|
||||
TCGLabel *label_match = gen_new_label();
|
||||
arm_gen_test_cc(cond, label_match);
|
||||
gen_goto_tb(s, 0, s->base.pc_next);
|
||||
gen_set_label(label_match);
|
||||
gen_goto_tb(s, 1, addr);
|
||||
DisasLabel match = gen_disas_label(s);
|
||||
arm_gen_test_cc(cond, match.label);
|
||||
gen_goto_tb(s, 0, 4);
|
||||
set_disas_label(s, match);
|
||||
gen_goto_tb(s, 1, diff);
|
||||
} else {
|
||||
/* 0xe and 0xf are both "always" conditions */
|
||||
gen_goto_tb(s, 0, addr);
|
||||
gen_goto_tb(s, 0, diff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1629,7 +1654,7 @@ static void handle_sync(DisasContext *s, uint32_t insn,
|
|||
* any pending interrupts immediately.
|
||||
*/
|
||||
reset_btype(s);
|
||||
gen_goto_tb(s, 0, s->base.pc_next);
|
||||
gen_goto_tb(s, 0, 4);
|
||||
return;
|
||||
|
||||
case 7: /* SB */
|
||||
|
@ -1641,7 +1666,7 @@ static void handle_sync(DisasContext *s, uint32_t insn,
|
|||
* MB and end the TB instead.
|
||||
*/
|
||||
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
|
||||
gen_goto_tb(s, 0, s->base.pc_next);
|
||||
gen_goto_tb(s, 0, 4);
|
||||
return;
|
||||
|
||||
default:
|
||||
|
@ -1905,7 +1930,7 @@ static void gen_sysreg_undef(DisasContext *s, bool isread,
|
|||
} else {
|
||||
syndrome = syn_uncategorized();
|
||||
}
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syndrome);
|
||||
gen_exception_insn(s, 0, EXCP_UDEF, syndrome);
|
||||
}
|
||||
|
||||
/* MRS - move from system register
|
||||
|
@ -1950,7 +1975,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
|
|||
uint32_t syndrome;
|
||||
|
||||
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
|
||||
gen_a64_set_pc_im(s->pc_curr);
|
||||
gen_a64_update_pc(s, 0);
|
||||
gen_helper_access_check_cp_reg(cpu_env,
|
||||
tcg_constant_ptr(ri),
|
||||
tcg_constant_i32(syndrome),
|
||||
|
@ -1960,7 +1985,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
|
|||
* The readfn or writefn might raise an exception;
|
||||
* synchronize the CPU state in case it does.
|
||||
*/
|
||||
gen_a64_set_pc_im(s->pc_curr);
|
||||
gen_a64_update_pc(s, 0);
|
||||
}
|
||||
|
||||
/* Handle special cases first */
|
||||
|
@ -2159,8 +2184,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
|||
switch (op2_ll) {
|
||||
case 1: /* SVC */
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
|
||||
syn_aa64_svc(imm16));
|
||||
gen_exception_insn(s, 4, EXCP_SWI, syn_aa64_svc(imm16));
|
||||
break;
|
||||
case 2: /* HVC */
|
||||
if (s->current_el == 0) {
|
||||
|
@ -2170,22 +2194,20 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
|||
/* The pre HVC helper handles cases when HVC gets trapped
|
||||
* as an undefined insn by runtime configuration.
|
||||
*/
|
||||
gen_a64_set_pc_im(s->pc_curr);
|
||||
gen_a64_update_pc(s, 0);
|
||||
gen_helper_pre_hvc(cpu_env);
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn_el(s, s->base.pc_next, EXCP_HVC,
|
||||
syn_aa64_hvc(imm16), 2);
|
||||
gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(imm16), 2);
|
||||
break;
|
||||
case 3: /* SMC */
|
||||
if (s->current_el == 0) {
|
||||
unallocated_encoding(s);
|
||||
break;
|
||||
}
|
||||
gen_a64_set_pc_im(s->pc_curr);
|
||||
gen_a64_update_pc(s, 0);
|
||||
gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16)));
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn_el(s, s->base.pc_next, EXCP_SMC,
|
||||
syn_aa64_smc(imm16), 3);
|
||||
gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(imm16), 3);
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
|
@ -2212,7 +2234,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
|||
* Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
|
||||
*/
|
||||
if (semihosting_enabled(s->current_el == 0) && imm16 == 0xf000) {
|
||||
gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
|
||||
gen_exception_internal_insn(s, EXCP_SEMIHOST);
|
||||
} else {
|
||||
unallocated_encoding(s);
|
||||
}
|
||||
|
@ -2302,11 +2324,17 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
|
|||
default:
|
||||
goto do_unallocated;
|
||||
}
|
||||
gen_a64_set_pc(s, dst);
|
||||
/* BLR also needs to load return address */
|
||||
if (opc == 1) {
|
||||
tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
|
||||
TCGv_i64 lr = cpu_reg(s, 30);
|
||||
if (dst == lr) {
|
||||
TCGv_i64 tmp = new_tmp_a64(s);
|
||||
tcg_gen_mov_i64(tmp, dst);
|
||||
dst = tmp;
|
||||
}
|
||||
gen_pc_plus_diff(s, lr, curr_insn_len(s));
|
||||
}
|
||||
gen_a64_set_pc(s, dst);
|
||||
break;
|
||||
|
||||
case 8: /* BRAA */
|
||||
|
@ -2329,11 +2357,17 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
|
|||
} else {
|
||||
dst = cpu_reg(s, rn);
|
||||
}
|
||||
gen_a64_set_pc(s, dst);
|
||||
/* BLRAA also needs to load return address */
|
||||
if (opc == 9) {
|
||||
tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
|
||||
TCGv_i64 lr = cpu_reg(s, 30);
|
||||
if (dst == lr) {
|
||||
TCGv_i64 tmp = new_tmp_a64(s);
|
||||
tcg_gen_mov_i64(tmp, dst);
|
||||
dst = tmp;
|
||||
}
|
||||
gen_pc_plus_diff(s, lr, curr_insn_len(s));
|
||||
}
|
||||
gen_a64_set_pc(s, dst);
|
||||
break;
|
||||
|
||||
case 4: /* ERET */
|
||||
|
@ -2901,7 +2935,8 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
|
|||
|
||||
tcg_rt = cpu_reg(s, rt);
|
||||
|
||||
clean_addr = tcg_constant_i64(s->pc_curr + imm);
|
||||
clean_addr = new_tmp_a64(s);
|
||||
gen_pc_plus_diff(s, clean_addr, imm);
|
||||
if (is_vector) {
|
||||
do_fp_ld(s, rt, clean_addr, size);
|
||||
} else {
|
||||
|
@ -4245,23 +4280,22 @@ static void disas_ldst(DisasContext *s, uint32_t insn)
|
|||
static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
unsigned int page, rd;
|
||||
uint64_t base;
|
||||
uint64_t offset;
|
||||
int64_t offset;
|
||||
|
||||
page = extract32(insn, 31, 1);
|
||||
/* SignExtend(immhi:immlo) -> offset */
|
||||
offset = sextract64(insn, 5, 19);
|
||||
offset = offset << 2 | extract32(insn, 29, 2);
|
||||
rd = extract32(insn, 0, 5);
|
||||
base = s->pc_curr;
|
||||
|
||||
if (page) {
|
||||
/* ADRP (page based) */
|
||||
base &= ~0xfff;
|
||||
offset <<= 12;
|
||||
/* The page offset is ok for TARGET_TB_PCREL. */
|
||||
offset -= s->pc_curr & 0xfff;
|
||||
}
|
||||
|
||||
tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
|
||||
gen_pc_plus_diff(s, cpu_reg(s, rd), offset);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -14601,22 +14635,21 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
|
|||
#ifdef CONFIG_USER_ONLY
|
||||
return page_get_flags(addr) & PAGE_BTI;
|
||||
#else
|
||||
CPUTLBEntryFull *full;
|
||||
void *host;
|
||||
int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
|
||||
unsigned int index = tlb_index(env, mmu_idx, addr);
|
||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||
int flags;
|
||||
|
||||
/*
|
||||
* We test this immediately after reading an insn, which means
|
||||
* that any normal page must be in the TLB. The only exception
|
||||
* would be for executing from flash or device memory, which
|
||||
* does not retain the TLB entry.
|
||||
*
|
||||
* FIXME: Assume false for those, for now. We could use
|
||||
* arm_cpu_get_phys_page_attrs_debug to re-read the page
|
||||
* table entry even for that case.
|
||||
* that the TLB entry must be present and valid, and thus this
|
||||
* access will never raise an exception.
|
||||
*/
|
||||
return (tlb_hit(entry->addr_code, addr) &&
|
||||
arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].fulltlb[index].attrs));
|
||||
flags = probe_access_full(env, addr, MMU_INST_FETCH, mmu_idx,
|
||||
false, &host, &full, 0);
|
||||
assert(!(flags & TLB_INVALID_MASK));
|
||||
|
||||
return full->guarded;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -14683,7 +14716,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
|||
|
||||
dc->isar = &arm_cpu->isar;
|
||||
dc->condjmp = 0;
|
||||
|
||||
dc->pc_save = dc->base.pc_first;
|
||||
dc->aarch64 = true;
|
||||
dc->thumb = false;
|
||||
dc->sctlr_b = 0;
|
||||
|
@ -14765,8 +14798,12 @@ static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
|||
static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
||||
{
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
target_ulong pc_arg = dc->base.pc_next;
|
||||
|
||||
tcg_gen_insn_start(dc->base.pc_next, 0, 0);
|
||||
if (TARGET_TB_PCREL) {
|
||||
pc_arg &= ~TARGET_PAGE_MASK;
|
||||
}
|
||||
tcg_gen_insn_start(pc_arg, 0, 0);
|
||||
dc->insn_start = tcg_last_op();
|
||||
}
|
||||
|
||||
|
@ -14823,7 +14860,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|||
* Illegal execution state. This has priority over BTI
|
||||
* exceptions, but comes after instruction abort exceptions.
|
||||
*/
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_illegalstate());
|
||||
gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -14854,8 +14891,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|||
if (s->btype != 0
|
||||
&& s->guarded_page
|
||||
&& !btype_destination_ok(insn, s->bt, s->btype)) {
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
||||
syn_btitrap(s->btype));
|
||||
gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
|
@ -14934,7 +14970,7 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
*/
|
||||
switch (dc->base.is_jmp) {
|
||||
default:
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
gen_a64_update_pc(dc, 4);
|
||||
/* fall through */
|
||||
case DISAS_EXIT:
|
||||
case DISAS_JUMP:
|
||||
|
@ -14947,17 +14983,17 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
switch (dc->base.is_jmp) {
|
||||
case DISAS_NEXT:
|
||||
case DISAS_TOO_MANY:
|
||||
gen_goto_tb(dc, 1, dc->base.pc_next);
|
||||
gen_goto_tb(dc, 1, 4);
|
||||
break;
|
||||
default:
|
||||
case DISAS_UPDATE_EXIT:
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
gen_a64_update_pc(dc, 4);
|
||||
/* fall through */
|
||||
case DISAS_EXIT:
|
||||
tcg_gen_exit_tb(NULL, 0);
|
||||
break;
|
||||
case DISAS_UPDATE_NOCHAIN:
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
gen_a64_update_pc(dc, 4);
|
||||
/* fall through */
|
||||
case DISAS_JUMP:
|
||||
tcg_gen_lookup_and_goto_ptr();
|
||||
|
@ -14966,11 +15002,11 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
case DISAS_SWI:
|
||||
break;
|
||||
case DISAS_WFE:
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
gen_a64_update_pc(dc, 4);
|
||||
gen_helper_wfe(cpu_env);
|
||||
break;
|
||||
case DISAS_YIELD:
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
gen_a64_update_pc(dc, 4);
|
||||
gen_helper_yield(cpu_env);
|
||||
break;
|
||||
case DISAS_WFI:
|
||||
|
@ -14978,7 +15014,7 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
* This is a special case because we don't want to just halt
|
||||
* the CPU if trying to debug across a WFI.
|
||||
*/
|
||||
gen_a64_set_pc_im(dc->base.pc_next);
|
||||
gen_a64_update_pc(dc, 4);
|
||||
gen_helper_wfi(cpu_env, tcg_constant_i32(4));
|
||||
/*
|
||||
* The helper doesn't necessarily throw an exception, but we
|
||||
|
|
|
@ -140,10 +140,10 @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
|
|||
tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
|
||||
tcg_gen_or_i32(sfpa, sfpa, aspen);
|
||||
arm_gen_condlabel(s);
|
||||
tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel);
|
||||
tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel.label);
|
||||
|
||||
if (s->fp_excp_el != 0) {
|
||||
gen_exception_insn_el(s, s->pc_curr, EXCP_NOCP,
|
||||
gen_exception_insn_el(s, 0, EXCP_NOCP,
|
||||
syn_uncategorized(), s->fp_excp_el);
|
||||
return true;
|
||||
}
|
||||
|
@ -765,12 +765,12 @@ static bool trans_NOCP(DisasContext *s, arg_nocp *a)
|
|||
}
|
||||
|
||||
if (a->cp != 10) {
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized());
|
||||
gen_exception_insn(s, 0, EXCP_NOCP, syn_uncategorized());
|
||||
return true;
|
||||
}
|
||||
|
||||
if (s->fp_excp_el != 0) {
|
||||
gen_exception_insn_el(s, s->pc_curr, EXCP_NOCP,
|
||||
gen_exception_insn_el(s, 0, EXCP_NOCP,
|
||||
syn_uncategorized(), s->fp_excp_el);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ bool mve_eci_check(DisasContext *s)
|
|||
return true;
|
||||
default:
|
||||
/* Reserved value: INVSTATE UsageFault */
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized());
|
||||
gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
|
|||
int coproc = arm_dc_feature(s, ARM_FEATURE_V8) ? 0 : 0xa;
|
||||
uint32_t syn = syn_fp_access_trap(1, 0xe, false, coproc);
|
||||
|
||||
gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF, syn, s->fp_excp_el);
|
||||
gen_exception_insn_el(s, 0, EXCP_UDEF, syn, s->fp_excp_el);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -240,9 +240,9 @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
|
|||
* appear to be any insns which touch VFP which are allowed.
|
||||
*/
|
||||
if (s->sme_trap_nonstreaming) {
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
||||
gen_exception_insn(s, 0, EXCP_UDEF,
|
||||
syn_smetrap(SME_ET_Streaming,
|
||||
s->base.pc_next - s->pc_curr == 2));
|
||||
curr_insn_len(s) == 2));
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,7 @@ bool vfp_access_check_m(DisasContext *s, bool skip_context_update)
|
|||
* the encoding space handled by the patterns in m-nocp.decode,
|
||||
* and for them we may need to raise NOCP here.
|
||||
*/
|
||||
gen_exception_insn_el(s, s->pc_curr, EXCP_NOCP,
|
||||
gen_exception_insn_el(s, 0, EXCP_NOCP,
|
||||
syn_uncategorized(), s->fp_excp_el);
|
||||
return false;
|
||||
}
|
||||
|
@ -856,7 +856,7 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
|
|||
case ARM_VFP_FPSID:
|
||||
if (s->current_el == 1) {
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, s->pc_curr);
|
||||
gen_update_pc(s, 0);
|
||||
gen_helper_check_hcr_el2_trap(cpu_env,
|
||||
tcg_constant_i32(a->rt),
|
||||
tcg_constant_i32(a->reg));
|
||||
|
|
|
@ -162,7 +162,7 @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
|
|||
void arm_gen_condlabel(DisasContext *s)
|
||||
{
|
||||
if (!s->condjmp) {
|
||||
s->condlabel = gen_new_label();
|
||||
s->condlabel = gen_disas_label(s);
|
||||
s->condjmp = 1;
|
||||
}
|
||||
}
|
||||
|
@ -260,17 +260,27 @@ static inline int get_a32_user_mem_index(DisasContext *s)
|
|||
}
|
||||
}
|
||||
|
||||
/* The architectural value of PC. */
|
||||
static uint32_t read_pc(DisasContext *s)
|
||||
/* The pc_curr difference for an architectural jump. */
|
||||
static target_long jmp_diff(DisasContext *s, target_long diff)
|
||||
{
|
||||
return s->pc_curr + (s->thumb ? 4 : 8);
|
||||
return diff + (s->thumb ? 4 : 8);
|
||||
}
|
||||
|
||||
static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff)
|
||||
{
|
||||
assert(s->pc_save != -1);
|
||||
if (TARGET_TB_PCREL) {
|
||||
tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff);
|
||||
} else {
|
||||
tcg_gen_movi_i32(var, s->pc_curr + diff);
|
||||
}
|
||||
}
|
||||
|
||||
/* Set a variable to the value of a CPU register. */
|
||||
void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
|
||||
{
|
||||
if (reg == 15) {
|
||||
tcg_gen_movi_i32(var, read_pc(s));
|
||||
gen_pc_plus_diff(s, var, jmp_diff(s, 0));
|
||||
} else {
|
||||
tcg_gen_mov_i32(var, cpu_R[reg]);
|
||||
}
|
||||
|
@ -286,7 +296,11 @@ TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
|
|||
TCGv_i32 tmp = tcg_temp_new_i32();
|
||||
|
||||
if (reg == 15) {
|
||||
tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
|
||||
/*
|
||||
* This address is computed from an aligned PC:
|
||||
* subtract off the low bits.
|
||||
*/
|
||||
gen_pc_plus_diff(s, tmp, jmp_diff(s, ofs - (s->pc_curr & 3)));
|
||||
} else {
|
||||
tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
|
||||
}
|
||||
|
@ -305,6 +319,7 @@ void store_reg(DisasContext *s, int reg, TCGv_i32 var)
|
|||
*/
|
||||
tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
|
||||
s->base.is_jmp = DISAS_JUMP;
|
||||
s->pc_save = -1;
|
||||
} else if (reg == 13 && arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
/* For M-profile SP bits [1:0] are always zero */
|
||||
tcg_gen_andi_i32(var, var, ~3);
|
||||
|
@ -768,9 +783,10 @@ void gen_set_condexec(DisasContext *s)
|
|||
}
|
||||
}
|
||||
|
||||
void gen_set_pc_im(DisasContext *s, target_ulong val)
|
||||
void gen_update_pc(DisasContext *s, target_long diff)
|
||||
{
|
||||
tcg_gen_movi_i32(cpu_R[15], val);
|
||||
gen_pc_plus_diff(s, cpu_R[15], diff);
|
||||
s->pc_save = s->pc_curr + diff;
|
||||
}
|
||||
|
||||
/* Set PC and Thumb state from var. var is marked as dead. */
|
||||
|
@ -780,6 +796,7 @@ static inline void gen_bx(DisasContext *s, TCGv_i32 var)
|
|||
tcg_gen_andi_i32(cpu_R[15], var, ~1);
|
||||
tcg_gen_andi_i32(var, var, 1);
|
||||
store_cpu_field(var, thumb);
|
||||
s->pc_save = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -821,7 +838,7 @@ static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
|
|||
static inline void gen_bx_excret_final_code(DisasContext *s)
|
||||
{
|
||||
/* Generate the code to finish possible exception return and end the TB */
|
||||
TCGLabel *excret_label = gen_new_label();
|
||||
DisasLabel excret_label = gen_disas_label(s);
|
||||
uint32_t min_magic;
|
||||
|
||||
if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
|
||||
|
@ -833,14 +850,14 @@ static inline void gen_bx_excret_final_code(DisasContext *s)
|
|||
}
|
||||
|
||||
/* Is the new PC value in the magic range indicating exception return? */
|
||||
tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
|
||||
tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label.label);
|
||||
/* No: end the TB as we would for a DISAS_JMP */
|
||||
if (s->ss_active) {
|
||||
gen_singlestep_exception(s);
|
||||
} else {
|
||||
tcg_gen_exit_tb(NULL, 0);
|
||||
}
|
||||
gen_set_label(excret_label);
|
||||
set_disas_label(s, excret_label);
|
||||
/* Yes: this is an exception return.
|
||||
* At this point in runtime env->regs[15] and env->thumb will hold
|
||||
* the exception-return magic number, which do_v7m_exception_exit()
|
||||
|
@ -862,7 +879,7 @@ static inline void gen_bxns(DisasContext *s, int rm)
|
|||
|
||||
/* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
|
||||
* we need to sync state before calling it, but:
|
||||
* - we don't need to do gen_set_pc_im() because the bxns helper will
|
||||
* - we don't need to do gen_update_pc() because the bxns helper will
|
||||
* always set the PC itself
|
||||
* - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
|
||||
* unless it's outside an IT block or the last insn in an IT block,
|
||||
|
@ -883,7 +900,7 @@ static inline void gen_blxns(DisasContext *s, int rm)
|
|||
* We do however need to set the PC, because the blxns helper reads it.
|
||||
* The blxns helper may throw an exception.
|
||||
*/
|
||||
gen_set_pc_im(s, s->base.pc_next);
|
||||
gen_update_pc(s, curr_insn_len(s));
|
||||
gen_helper_v7m_blxns(cpu_env, var);
|
||||
tcg_temp_free_i32(var);
|
||||
s->base.is_jmp = DISAS_EXIT;
|
||||
|
@ -1051,7 +1068,7 @@ static inline void gen_hvc(DisasContext *s, int imm16)
|
|||
* as an undefined insn by runtime configuration (ie before
|
||||
* the insn really executes).
|
||||
*/
|
||||
gen_set_pc_im(s, s->pc_curr);
|
||||
gen_update_pc(s, 0);
|
||||
gen_helper_pre_hvc(cpu_env);
|
||||
/* Otherwise we will treat this as a real exception which
|
||||
* happens after execution of the insn. (The distinction matters
|
||||
|
@ -1059,7 +1076,7 @@ static inline void gen_hvc(DisasContext *s, int imm16)
|
|||
* for single stepping.)
|
||||
*/
|
||||
s->svc_imm = imm16;
|
||||
gen_set_pc_im(s, s->base.pc_next);
|
||||
gen_update_pc(s, curr_insn_len(s));
|
||||
s->base.is_jmp = DISAS_HVC;
|
||||
}
|
||||
|
||||
|
@ -1068,16 +1085,16 @@ static inline void gen_smc(DisasContext *s)
|
|||
/* As with HVC, we may take an exception either before or after
|
||||
* the insn executes.
|
||||
*/
|
||||
gen_set_pc_im(s, s->pc_curr);
|
||||
gen_update_pc(s, 0);
|
||||
gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa32_smc()));
|
||||
gen_set_pc_im(s, s->base.pc_next);
|
||||
gen_update_pc(s, curr_insn_len(s));
|
||||
s->base.is_jmp = DISAS_SMC;
|
||||
}
|
||||
|
||||
static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
|
||||
static void gen_exception_internal_insn(DisasContext *s, int excp)
|
||||
{
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, pc);
|
||||
gen_update_pc(s, 0);
|
||||
gen_exception_internal(excp);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
@ -1099,32 +1116,34 @@ static void gen_exception(int excp, uint32_t syndrome)
|
|||
tcg_constant_i32(syndrome));
|
||||
}
|
||||
|
||||
static void gen_exception_insn_el_v(DisasContext *s, uint64_t pc, int excp,
|
||||
uint32_t syn, TCGv_i32 tcg_el)
|
||||
static void gen_exception_insn_el_v(DisasContext *s, target_long pc_diff,
|
||||
int excp, uint32_t syn, TCGv_i32 tcg_el)
|
||||
{
|
||||
if (s->aarch64) {
|
||||
gen_a64_set_pc_im(pc);
|
||||
gen_a64_update_pc(s, pc_diff);
|
||||
} else {
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, pc);
|
||||
gen_update_pc(s, pc_diff);
|
||||
}
|
||||
gen_exception_el_v(excp, syn, tcg_el);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
||||
void gen_exception_insn_el(DisasContext *s, uint64_t pc, int excp,
|
||||
void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
|
||||
uint32_t syn, uint32_t target_el)
|
||||
{
|
||||
gen_exception_insn_el_v(s, pc, excp, syn, tcg_constant_i32(target_el));
|
||||
gen_exception_insn_el_v(s, pc_diff, excp, syn,
|
||||
tcg_constant_i32(target_el));
|
||||
}
|
||||
|
||||
void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, uint32_t syn)
|
||||
void gen_exception_insn(DisasContext *s, target_long pc_diff,
|
||||
int excp, uint32_t syn)
|
||||
{
|
||||
if (s->aarch64) {
|
||||
gen_a64_set_pc_im(pc);
|
||||
gen_a64_update_pc(s, pc_diff);
|
||||
} else {
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, pc);
|
||||
gen_update_pc(s, pc_diff);
|
||||
}
|
||||
gen_exception(excp, syn);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
|
@ -1133,7 +1152,7 @@ void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, uint32_t syn)
|
|||
static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
|
||||
{
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, s->pc_curr);
|
||||
gen_update_pc(s, 0);
|
||||
gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syn));
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
@ -1141,13 +1160,13 @@ static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
|
|||
void unallocated_encoding(DisasContext *s)
|
||||
{
|
||||
/* Unallocated and reserved encodings are uncategorized */
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized());
|
||||
gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
|
||||
}
|
||||
|
||||
/* Force a TB lookup after an instruction that changes the CPU state. */
|
||||
void gen_lookup_tb(DisasContext *s)
|
||||
{
|
||||
tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
|
||||
gen_pc_plus_diff(s, cpu_R[15], curr_insn_len(s));
|
||||
s->base.is_jmp = DISAS_EXIT;
|
||||
}
|
||||
|
||||
|
@ -1167,7 +1186,7 @@ static inline void gen_hlt(DisasContext *s, int imm)
|
|||
*/
|
||||
if (semihosting_enabled(s->current_el != 0) &&
|
||||
(imm == (s->thumb ? 0x3c : 0xf000))) {
|
||||
gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
|
||||
gen_exception_internal_insn(s, EXCP_SEMIHOST);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2590,25 +2609,38 @@ static void gen_goto_ptr(void)
|
|||
* cpu_loop_exec. Any live exit_requests will be processed as we
|
||||
* enter the next TB.
|
||||
*/
|
||||
static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
|
||||
static void gen_goto_tb(DisasContext *s, int n, target_long diff)
|
||||
{
|
||||
if (translator_use_goto_tb(&s->base, dest)) {
|
||||
tcg_gen_goto_tb(n);
|
||||
gen_set_pc_im(s, dest);
|
||||
if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) {
|
||||
/*
|
||||
* For pcrel, the pc must always be up-to-date on entry to
|
||||
* the linked TB, so that it can use simple additions for all
|
||||
* further adjustments. For !pcrel, the linked TB is compiled
|
||||
* to know its full virtual address, so we can delay the
|
||||
* update to pc to the unlinked path. A long chain of links
|
||||
* can thus avoid many updates to the PC.
|
||||
*/
|
||||
if (TARGET_TB_PCREL) {
|
||||
gen_update_pc(s, diff);
|
||||
tcg_gen_goto_tb(n);
|
||||
} else {
|
||||
tcg_gen_goto_tb(n);
|
||||
gen_update_pc(s, diff);
|
||||
}
|
||||
tcg_gen_exit_tb(s->base.tb, n);
|
||||
} else {
|
||||
gen_set_pc_im(s, dest);
|
||||
gen_update_pc(s, diff);
|
||||
gen_goto_ptr();
|
||||
}
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
||||
/* Jump, specifying which TB number to use if we gen_goto_tb() */
|
||||
static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
|
||||
static void gen_jmp_tb(DisasContext *s, target_long diff, int tbno)
|
||||
{
|
||||
if (unlikely(s->ss_active)) {
|
||||
/* An indirect jump so that we still trigger the debug exception. */
|
||||
gen_set_pc_im(s, dest);
|
||||
gen_update_pc(s, diff);
|
||||
s->base.is_jmp = DISAS_JUMP;
|
||||
return;
|
||||
}
|
||||
|
@ -2625,7 +2657,7 @@ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
|
|||
* gen_jmp();
|
||||
* on the second call to gen_jmp().
|
||||
*/
|
||||
gen_goto_tb(s, tbno, dest);
|
||||
gen_goto_tb(s, tbno, diff);
|
||||
break;
|
||||
case DISAS_UPDATE_NOCHAIN:
|
||||
case DISAS_UPDATE_EXIT:
|
||||
|
@ -2634,7 +2666,7 @@ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
|
|||
* Avoid using goto_tb so we really do exit back to the main loop
|
||||
* and don't chain to another TB.
|
||||
*/
|
||||
gen_set_pc_im(s, dest);
|
||||
gen_update_pc(s, diff);
|
||||
gen_goto_ptr();
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
break;
|
||||
|
@ -2647,9 +2679,9 @@ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void gen_jmp(DisasContext *s, uint32_t dest)
|
||||
static inline void gen_jmp(DisasContext *s, target_long diff)
|
||||
{
|
||||
gen_jmp_tb(s, dest, 0);
|
||||
gen_jmp_tb(s, diff, 0);
|
||||
}
|
||||
|
||||
static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
|
||||
|
@ -2861,7 +2893,7 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
|
|||
tcg_el = tcg_constant_i32(3);
|
||||
}
|
||||
|
||||
gen_exception_insn_el_v(s, s->pc_curr, EXCP_UDEF,
|
||||
gen_exception_insn_el_v(s, 0, EXCP_UDEF,
|
||||
syn_uncategorized(), tcg_el);
|
||||
tcg_temp_free_i32(tcg_el);
|
||||
return false;
|
||||
|
@ -2887,7 +2919,7 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
|
|||
|
||||
undef:
|
||||
/* If we get here then some access check did not pass */
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized());
|
||||
gen_exception_insn(s, 0, EXCP_UDEF, syn_uncategorized());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -2902,7 +2934,7 @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
|
|||
|
||||
/* Sync state because msr_banked() can raise exceptions */
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, s->pc_curr);
|
||||
gen_update_pc(s, 0);
|
||||
tcg_reg = load_reg(s, rn);
|
||||
gen_helper_msr_banked(cpu_env, tcg_reg,
|
||||
tcg_constant_i32(tgtmode),
|
||||
|
@ -2922,7 +2954,7 @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
|
|||
|
||||
/* Sync state because mrs_banked() can raise exceptions */
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, s->pc_curr);
|
||||
gen_update_pc(s, 0);
|
||||
tcg_reg = tcg_temp_new_i32();
|
||||
gen_helper_mrs_banked(tcg_reg, cpu_env,
|
||||
tcg_constant_i32(tgtmode),
|
||||
|
@ -4743,7 +4775,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
|
|||
}
|
||||
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, s->pc_curr);
|
||||
gen_update_pc(s, 0);
|
||||
gen_helper_access_check_cp_reg(cpu_env,
|
||||
tcg_constant_ptr(ri),
|
||||
tcg_constant_i32(syndrome),
|
||||
|
@ -4754,7 +4786,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
|
|||
* synchronize the CPU state in case it does.
|
||||
*/
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, s->pc_curr);
|
||||
gen_update_pc(s, 0);
|
||||
}
|
||||
|
||||
/* Handle special cases first */
|
||||
|
@ -4768,7 +4800,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
|
|||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
gen_set_pc_im(s, s->base.pc_next);
|
||||
gen_update_pc(s, curr_insn_len(s));
|
||||
s->base.is_jmp = DISAS_WFI;
|
||||
return;
|
||||
default:
|
||||
|
@ -5111,8 +5143,7 @@ static void gen_srs(DisasContext *s,
|
|||
* For the UNPREDICTABLE cases we choose to UNDEF.
|
||||
*/
|
||||
if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
|
||||
gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
|
||||
syn_uncategorized(), 3);
|
||||
gen_exception_insn_el(s, 0, EXCP_UDEF, syn_uncategorized(), 3);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -5155,7 +5186,7 @@ static void gen_srs(DisasContext *s,
|
|||
addr = tcg_temp_new_i32();
|
||||
/* get_r13_banked() will raise an exception if called from System mode */
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, s->pc_curr);
|
||||
gen_update_pc(s, 0);
|
||||
gen_helper_get_r13_banked(addr, cpu_env, tcg_constant_i32(mode));
|
||||
switch (amode) {
|
||||
case 0: /* DA */
|
||||
|
@ -5209,7 +5240,7 @@ static void gen_srs(DisasContext *s,
|
|||
static void arm_skip_unless(DisasContext *s, uint32_t cond)
|
||||
{
|
||||
arm_gen_condlabel(s);
|
||||
arm_gen_test_cc(cond ^ 1, s->condlabel);
|
||||
arm_gen_test_cc(cond ^ 1, s->condlabel.label);
|
||||
}
|
||||
|
||||
|
||||
|
@ -6224,7 +6255,7 @@ static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
|
|||
* scheduling of other vCPUs.
|
||||
*/
|
||||
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
|
||||
gen_set_pc_im(s, s->base.pc_next);
|
||||
gen_update_pc(s, curr_insn_len(s));
|
||||
s->base.is_jmp = DISAS_YIELD;
|
||||
}
|
||||
return true;
|
||||
|
@ -6240,7 +6271,7 @@ static bool trans_WFE(DisasContext *s, arg_WFE *a)
|
|||
* implemented so we can't sleep like WFI does.
|
||||
*/
|
||||
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
|
||||
gen_set_pc_im(s, s->base.pc_next);
|
||||
gen_update_pc(s, curr_insn_len(s));
|
||||
s->base.is_jmp = DISAS_WFE;
|
||||
}
|
||||
return true;
|
||||
|
@ -6249,7 +6280,7 @@ static bool trans_WFE(DisasContext *s, arg_WFE *a)
|
|||
static bool trans_WFI(DisasContext *s, arg_WFI *a)
|
||||
{
|
||||
/* For WFI, halt the vCPU until an IRQ. */
|
||||
gen_set_pc_im(s, s->base.pc_next);
|
||||
gen_update_pc(s, curr_insn_len(s));
|
||||
s->base.is_jmp = DISAS_WFI;
|
||||
return true;
|
||||
}
|
||||
|
@ -6470,7 +6501,7 @@ static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
|
|||
return false;
|
||||
}
|
||||
tmp = load_reg(s, a->rm);
|
||||
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
|
||||
gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
|
||||
gen_bx(s, tmp);
|
||||
return true;
|
||||
}
|
||||
|
@ -6551,7 +6582,7 @@ static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
|
|||
if (arm_dc_feature(s, ARM_FEATURE_M) &&
|
||||
semihosting_enabled(s->current_el == 0) &&
|
||||
(a->imm == 0xab)) {
|
||||
gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
|
||||
gen_exception_internal_insn(s, EXCP_SEMIHOST);
|
||||
} else {
|
||||
gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
|
||||
}
|
||||
|
@ -6650,7 +6681,7 @@ static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
|
|||
/* ISS not valid if writeback */
|
||||
if (p && !w) {
|
||||
ret = rd;
|
||||
if (s->base.pc_next - s->pc_curr == 2) {
|
||||
if (curr_insn_len(s) == 2) {
|
||||
ret |= ISSIs16Bit;
|
||||
}
|
||||
} else {
|
||||
|
@ -8317,7 +8348,7 @@ static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
|
|||
|
||||
static bool trans_B(DisasContext *s, arg_i *a)
|
||||
{
|
||||
gen_jmp(s, read_pc(s) + a->imm);
|
||||
gen_jmp(s, jmp_diff(s, a->imm));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -8332,14 +8363,14 @@ static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
|
|||
return true;
|
||||
}
|
||||
arm_skip_unless(s, a->cond);
|
||||
gen_jmp(s, read_pc(s) + a->imm);
|
||||
gen_jmp(s, jmp_diff(s, a->imm));
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_BL(DisasContext *s, arg_i *a)
|
||||
{
|
||||
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
|
||||
gen_jmp(s, read_pc(s) + a->imm);
|
||||
gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
|
||||
gen_jmp(s, jmp_diff(s, a->imm));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -8357,16 +8388,17 @@ static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
|
|||
if (s->thumb && (a->imm & 2)) {
|
||||
return false;
|
||||
}
|
||||
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
|
||||
gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb);
|
||||
store_cpu_field_constant(!s->thumb, thumb);
|
||||
gen_jmp(s, (read_pc(s) & ~3) + a->imm);
|
||||
/* This jump is computed from an aligned PC: subtract off the low bits. */
|
||||
gen_jmp(s, jmp_diff(s, a->imm - (s->pc_curr & 3)));
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
|
||||
{
|
||||
assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
|
||||
tcg_gen_movi_i32(cpu_R[14], read_pc(s) + (a->imm << 12));
|
||||
gen_pc_plus_diff(s, cpu_R[14], jmp_diff(s, a->imm << 12));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -8376,7 +8408,7 @@ static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
|
|||
|
||||
assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
|
||||
tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
|
||||
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
|
||||
gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
|
||||
gen_bx(s, tmp);
|
||||
return true;
|
||||
}
|
||||
|
@ -8392,7 +8424,7 @@ static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
|
|||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
|
||||
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
|
||||
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
|
||||
gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | 1);
|
||||
gen_bx(s, tmp);
|
||||
return true;
|
||||
}
|
||||
|
@ -8459,7 +8491,7 @@ static bool trans_WLS(DisasContext *s, arg_WLS *a)
|
|||
{
|
||||
/* M-profile low-overhead while-loop start */
|
||||
TCGv_i32 tmp;
|
||||
TCGLabel *nextlabel;
|
||||
DisasLabel nextlabel;
|
||||
|
||||
if (!dc_isar_feature(aa32_lob, s)) {
|
||||
return false;
|
||||
|
@ -8494,14 +8526,14 @@ static bool trans_WLS(DisasContext *s, arg_WLS *a)
|
|||
* Do the check-and-raise-exception by hand.
|
||||
*/
|
||||
if (s->fp_excp_el) {
|
||||
gen_exception_insn_el(s, s->pc_curr, EXCP_NOCP,
|
||||
gen_exception_insn_el(s, 0, EXCP_NOCP,
|
||||
syn_uncategorized(), s->fp_excp_el);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
nextlabel = gen_new_label();
|
||||
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel);
|
||||
nextlabel = gen_disas_label(s);
|
||||
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel.label);
|
||||
tmp = load_reg(s, a->rn);
|
||||
store_reg(s, 14, tmp);
|
||||
if (a->size != 4) {
|
||||
|
@ -8520,10 +8552,10 @@ static bool trans_WLS(DisasContext *s, arg_WLS *a)
|
|||
* when we take this upcoming exit from this TB, so gen_jmp_tb() is OK.
|
||||
*/
|
||||
}
|
||||
gen_jmp_tb(s, s->base.pc_next, 1);
|
||||
gen_jmp_tb(s, curr_insn_len(s), 1);
|
||||
|
||||
gen_set_label(nextlabel);
|
||||
gen_jmp(s, read_pc(s) + a->imm);
|
||||
set_disas_label(s, nextlabel);
|
||||
gen_jmp(s, jmp_diff(s, a->imm));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -8538,7 +8570,7 @@ static bool trans_LE(DisasContext *s, arg_LE *a)
|
|||
* any faster.
|
||||
*/
|
||||
TCGv_i32 tmp;
|
||||
TCGLabel *loopend;
|
||||
DisasLabel loopend;
|
||||
bool fpu_active;
|
||||
|
||||
if (!dc_isar_feature(aa32_lob, s)) {
|
||||
|
@ -8593,17 +8625,17 @@ static bool trans_LE(DisasContext *s, arg_LE *a)
|
|||
|
||||
if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) {
|
||||
/* Need to do a runtime check for LTPSIZE != 4 */
|
||||
TCGLabel *skipexc = gen_new_label();
|
||||
DisasLabel skipexc = gen_disas_label(s);
|
||||
tmp = load_cpu_field(v7m.ltpsize);
|
||||
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc);
|
||||
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label);
|
||||
tcg_temp_free_i32(tmp);
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized());
|
||||
gen_set_label(skipexc);
|
||||
gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
|
||||
set_disas_label(s, skipexc);
|
||||
}
|
||||
|
||||
if (a->f) {
|
||||
/* Loop-forever: just jump back to the loop start */
|
||||
gen_jmp(s, read_pc(s) - a->imm);
|
||||
gen_jmp(s, jmp_diff(s, -a->imm));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -8613,9 +8645,9 @@ static bool trans_LE(DisasContext *s, arg_LE *a)
|
|||
* loop decrement value is 1. For LETP we need to calculate the decrement
|
||||
* value from LTPSIZE.
|
||||
*/
|
||||
loopend = gen_new_label();
|
||||
loopend = gen_disas_label(s);
|
||||
if (!a->tp) {
|
||||
tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend);
|
||||
tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend.label);
|
||||
tcg_gen_addi_i32(cpu_R[14], cpu_R[14], -1);
|
||||
} else {
|
||||
/*
|
||||
|
@ -8628,21 +8660,21 @@ static bool trans_LE(DisasContext *s, arg_LE *a)
|
|||
tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr);
|
||||
tcg_temp_free_i32(ltpsize);
|
||||
|
||||
tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend);
|
||||
tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend.label);
|
||||
|
||||
tcg_gen_sub_i32(cpu_R[14], cpu_R[14], decr);
|
||||
tcg_temp_free_i32(decr);
|
||||
}
|
||||
/* Jump back to the loop start */
|
||||
gen_jmp(s, read_pc(s) - a->imm);
|
||||
gen_jmp(s, jmp_diff(s, -a->imm));
|
||||
|
||||
gen_set_label(loopend);
|
||||
set_disas_label(s, loopend);
|
||||
if (a->tp) {
|
||||
/* Exits from tail-pred loops must reset LTPSIZE to 4 */
|
||||
store_cpu_field(tcg_constant_i32(4), v7m.ltpsize);
|
||||
}
|
||||
/* End TB, continuing to following insn */
|
||||
gen_jmp_tb(s, s->base.pc_next, 1);
|
||||
gen_jmp_tb(s, curr_insn_len(s), 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -8715,10 +8747,11 @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
|
|||
tcg_gen_add_i32(addr, addr, tmp);
|
||||
|
||||
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
|
||||
tcg_temp_free_i32(addr);
|
||||
|
||||
tcg_gen_add_i32(tmp, tmp, tmp);
|
||||
tcg_gen_addi_i32(tmp, tmp, read_pc(s));
|
||||
gen_pc_plus_diff(s, addr, jmp_diff(s, 0));
|
||||
tcg_gen_add_i32(tmp, tmp, addr);
|
||||
tcg_temp_free_i32(addr);
|
||||
store_reg(s, 15, tmp);
|
||||
return true;
|
||||
}
|
||||
|
@ -8739,9 +8772,9 @@ static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
|
|||
|
||||
arm_gen_condlabel(s);
|
||||
tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
|
||||
tmp, 0, s->condlabel);
|
||||
tmp, 0, s->condlabel.label);
|
||||
tcg_temp_free_i32(tmp);
|
||||
gen_jmp(s, read_pc(s) + a->imm);
|
||||
gen_jmp(s, jmp_diff(s, a->imm));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -8757,9 +8790,9 @@ static bool trans_SVC(DisasContext *s, arg_SVC *a)
|
|||
if (!arm_dc_feature(s, ARM_FEATURE_M) &&
|
||||
semihosting_enabled(s->current_el == 0) &&
|
||||
(a->imm == semihost_imm)) {
|
||||
gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
|
||||
gen_exception_internal_insn(s, EXCP_SEMIHOST);
|
||||
} else {
|
||||
gen_set_pc_im(s, s->base.pc_next);
|
||||
gen_update_pc(s, curr_insn_len(s));
|
||||
s->svc_imm = a->imm;
|
||||
s->base.is_jmp = DISAS_SWI;
|
||||
}
|
||||
|
@ -9065,7 +9098,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
|||
* UsageFault exception.
|
||||
*/
|
||||
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized());
|
||||
gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -9074,7 +9107,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
|||
* Illegal execution state. This has priority over BTI
|
||||
* exceptions, but comes after instruction abort exceptions.
|
||||
*/
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_illegalstate());
|
||||
gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -9305,7 +9338,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
|||
|
||||
dc->isar = &cpu->isar;
|
||||
dc->condjmp = 0;
|
||||
|
||||
dc->pc_save = dc->base.pc_first;
|
||||
dc->aarch64 = false;
|
||||
dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
|
||||
dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
|
||||
|
@ -9323,7 +9356,6 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
|||
*/
|
||||
dc->eci = dc->condexec_mask = dc->condexec_cond = 0;
|
||||
dc->eci_handled = false;
|
||||
dc->insn_eci_rewind = NULL;
|
||||
if (condexec & 0xf) {
|
||||
dc->condexec_mask = (condexec & 0xf) << 1;
|
||||
dc->condexec_cond = condexec >> 4;
|
||||
|
@ -9459,13 +9491,17 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
|||
* fields here.
|
||||
*/
|
||||
uint32_t condexec_bits;
|
||||
target_ulong pc_arg = dc->base.pc_next;
|
||||
|
||||
if (TARGET_TB_PCREL) {
|
||||
pc_arg &= ~TARGET_PAGE_MASK;
|
||||
}
|
||||
if (dc->eci) {
|
||||
condexec_bits = dc->eci << 4;
|
||||
} else {
|
||||
condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
|
||||
}
|
||||
tcg_gen_insn_start(dc->base.pc_next, condexec_bits, 0);
|
||||
tcg_gen_insn_start(pc_arg, condexec_bits, 0);
|
||||
dc->insn_start = tcg_last_op();
|
||||
}
|
||||
|
||||
|
@ -9508,8 +9544,11 @@ static bool arm_check_ss_active(DisasContext *dc)
|
|||
|
||||
static void arm_post_translate_insn(DisasContext *dc)
|
||||
{
|
||||
if (dc->condjmp && !dc->base.is_jmp) {
|
||||
gen_set_label(dc->condlabel);
|
||||
if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) {
|
||||
if (dc->pc_save != dc->condlabel.pc_save) {
|
||||
gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save);
|
||||
}
|
||||
gen_set_label(dc->condlabel.label);
|
||||
dc->condjmp = 0;
|
||||
}
|
||||
translator_loop_temp_check(&dc->base);
|
||||
|
@ -9612,6 +9651,9 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|||
uint32_t pc = dc->base.pc_next;
|
||||
uint32_t insn;
|
||||
bool is_16bit;
|
||||
/* TCG op to rewind to if this turns out to be an invalid ECI state */
|
||||
TCGOp *insn_eci_rewind = NULL;
|
||||
target_ulong insn_eci_pc_save = -1;
|
||||
|
||||
/* Misaligned thumb PC is architecturally impossible. */
|
||||
assert((dc->base.pc_next & 1) == 0);
|
||||
|
@ -9638,7 +9680,7 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|||
* Illegal execution state. This has priority over BTI
|
||||
* exceptions, but comes after instruction abort exceptions.
|
||||
*/
|
||||
gen_exception_insn(dc, dc->pc_curr, EXCP_UDEF, syn_illegalstate());
|
||||
gen_exception_insn(dc, 0, EXCP_UDEF, syn_illegalstate());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -9673,7 +9715,8 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|||
* insn" case. We will rewind to the marker (ie throwing away
|
||||
* all the generated code) and instead emit "take exception".
|
||||
*/
|
||||
dc->insn_eci_rewind = tcg_last_op();
|
||||
insn_eci_rewind = tcg_last_op();
|
||||
insn_eci_pc_save = dc->pc_save;
|
||||
}
|
||||
|
||||
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
|
||||
|
@ -9709,10 +9752,10 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|||
* Insn wasn't valid for ECI/ICI at all: undo what we
|
||||
* just generated and instead emit an exception
|
||||
*/
|
||||
tcg_remove_ops_after(dc->insn_eci_rewind);
|
||||
tcg_remove_ops_after(insn_eci_rewind);
|
||||
dc->pc_save = insn_eci_pc_save;
|
||||
dc->condjmp = 0;
|
||||
gen_exception_insn(dc, dc->pc_curr, EXCP_INVSTATE,
|
||||
syn_uncategorized());
|
||||
gen_exception_insn(dc, 0, EXCP_INVSTATE, syn_uncategorized());
|
||||
}
|
||||
|
||||
arm_post_translate_insn(dc);
|
||||
|
@ -9772,7 +9815,7 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
case DISAS_TOO_MANY:
|
||||
case DISAS_UPDATE_EXIT:
|
||||
case DISAS_UPDATE_NOCHAIN:
|
||||
gen_set_pc_im(dc, dc->base.pc_next);
|
||||
gen_update_pc(dc, curr_insn_len(dc));
|
||||
/* fall through */
|
||||
default:
|
||||
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
|
||||
|
@ -9793,16 +9836,16 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
switch (dc->base.is_jmp) {
|
||||
case DISAS_NEXT:
|
||||
case DISAS_TOO_MANY:
|
||||
gen_goto_tb(dc, 1, dc->base.pc_next);
|
||||
gen_goto_tb(dc, 1, curr_insn_len(dc));
|
||||
break;
|
||||
case DISAS_UPDATE_NOCHAIN:
|
||||
gen_set_pc_im(dc, dc->base.pc_next);
|
||||
gen_update_pc(dc, curr_insn_len(dc));
|
||||
/* fall through */
|
||||
case DISAS_JUMP:
|
||||
gen_goto_ptr();
|
||||
break;
|
||||
case DISAS_UPDATE_EXIT:
|
||||
gen_set_pc_im(dc, dc->base.pc_next);
|
||||
gen_update_pc(dc, curr_insn_len(dc));
|
||||
/* fall through */
|
||||
default:
|
||||
/* indicate that the hash table must be used to find the next TB */
|
||||
|
@ -9812,8 +9855,7 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
/* nothing more to generate */
|
||||
break;
|
||||
case DISAS_WFI:
|
||||
gen_helper_wfi(cpu_env,
|
||||
tcg_constant_i32(dc->base.pc_next - dc->pc_curr));
|
||||
gen_helper_wfi(cpu_env, tcg_constant_i32(curr_insn_len(dc)));
|
||||
/*
|
||||
* The helper doesn't necessarily throw an exception, but we
|
||||
* must go back to the main loop to check for interrupts anyway.
|
||||
|
@ -9840,13 +9882,13 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
|
||||
if (dc->condjmp) {
|
||||
/* "Condition failed" instruction codepath for the branch/trap insn */
|
||||
gen_set_label(dc->condlabel);
|
||||
set_disas_label(dc, dc->condlabel);
|
||||
gen_set_condexec(dc);
|
||||
if (unlikely(dc->ss_active)) {
|
||||
gen_set_pc_im(dc, dc->base.pc_next);
|
||||
gen_update_pc(dc, curr_insn_len(dc));
|
||||
gen_singlestep_exception(dc);
|
||||
} else {
|
||||
gen_goto_tb(dc, 1, dc->base.pc_next);
|
||||
gen_goto_tb(dc, 1, curr_insn_len(dc));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9902,11 +9944,19 @@ void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
|
|||
target_ulong *data)
|
||||
{
|
||||
if (is_a64(env)) {
|
||||
env->pc = data[0];
|
||||
if (TARGET_TB_PCREL) {
|
||||
env->pc = (env->pc & TARGET_PAGE_MASK) | data[0];
|
||||
} else {
|
||||
env->pc = data[0];
|
||||
}
|
||||
env->condexec_bits = 0;
|
||||
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
|
||||
} else {
|
||||
env->regs[15] = data[0];
|
||||
if (TARGET_TB_PCREL) {
|
||||
env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0];
|
||||
} else {
|
||||
env->regs[15] = data[0];
|
||||
}
|
||||
env->condexec_bits = data[1];
|
||||
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
|
||||
}
|
||||
|
|
|
@ -6,18 +6,42 @@
|
|||
|
||||
|
||||
/* internal defines */
|
||||
|
||||
/*
|
||||
* Save pc_save across a branch, so that we may restore the value from
|
||||
* before the branch at the point the label is emitted.
|
||||
*/
|
||||
typedef struct DisasLabel {
|
||||
TCGLabel *label;
|
||||
target_ulong pc_save;
|
||||
} DisasLabel;
|
||||
|
||||
typedef struct DisasContext {
|
||||
DisasContextBase base;
|
||||
const ARMISARegisters *isar;
|
||||
|
||||
/* The address of the current instruction being translated. */
|
||||
target_ulong pc_curr;
|
||||
/*
|
||||
* For TARGET_TB_PCREL, the full value of cpu_pc is not known
|
||||
* (although the page offset is known). For convenience, the
|
||||
* translation loop uses the full virtual address that triggered
|
||||
* the translation, from base.pc_start through pc_curr.
|
||||
* For efficiency, we do not update cpu_pc for every instruction.
|
||||
* Instead, pc_save has the value of pc_curr at the time of the
|
||||
* last update to cpu_pc, which allows us to compute the addend
|
||||
* needed to bring cpu_pc current: pc_curr - pc_save.
|
||||
* If cpu_pc now contains the destination of an indirect branch,
|
||||
* pc_save contains -1 to indicate that relative updates are no
|
||||
* longer possible.
|
||||
*/
|
||||
target_ulong pc_save;
|
||||
target_ulong page_start;
|
||||
uint32_t insn;
|
||||
/* Nonzero if this instruction has been conditionally skipped. */
|
||||
int condjmp;
|
||||
/* The label that will be jumped to when the instruction is skipped. */
|
||||
TCGLabel *condlabel;
|
||||
DisasLabel condlabel;
|
||||
/* Thumb-2 conditional execution bits. */
|
||||
int condexec_mask;
|
||||
int condexec_cond;
|
||||
|
@ -28,8 +52,6 @@ typedef struct DisasContext {
|
|||
* after decode (ie after any UNDEF checks)
|
||||
*/
|
||||
bool eci_handled;
|
||||
/* TCG op to rewind to if this turns out to be an invalid ECI state */
|
||||
TCGOp *insn_eci_rewind;
|
||||
int sctlr_b;
|
||||
MemOp be_data;
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
@ -226,6 +248,11 @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
|
|||
s->insn_start = NULL;
|
||||
}
|
||||
|
||||
static inline int curr_insn_len(DisasContext *s)
|
||||
{
|
||||
return s->base.pc_next - s->pc_curr;
|
||||
}
|
||||
|
||||
/* is_jmp field values */
|
||||
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
|
||||
/* CPU state was modified dynamically; exit to main loop for interrupts. */
|
||||
|
@ -249,7 +276,7 @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
|
|||
* For instructions which want an immediate exit to the main loop, as opposed
|
||||
* to attempting to use lookup_and_goto_ptr. Unlike DISAS_UPDATE_EXIT, this
|
||||
* doesn't write the PC on exiting the translation loop so you need to ensure
|
||||
* something (gen_a64_set_pc_im or runtime helper) has done so before we reach
|
||||
* something (gen_a64_update_pc or runtime helper) has done so before we reach
|
||||
* return from cpu_tb_exec.
|
||||
*/
|
||||
#define DISAS_EXIT DISAS_TARGET_9
|
||||
|
@ -258,14 +285,14 @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
|
|||
|
||||
#ifdef TARGET_AARCH64
|
||||
void a64_translate_init(void);
|
||||
void gen_a64_set_pc_im(uint64_t val);
|
||||
void gen_a64_update_pc(DisasContext *s, target_long diff);
|
||||
extern const TranslatorOps aarch64_translator_ops;
|
||||
#else
|
||||
static inline void a64_translate_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void gen_a64_set_pc_im(uint64_t val)
|
||||
static inline void gen_a64_update_pc(DisasContext *s, target_long diff)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@ -276,9 +303,10 @@ void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
|
|||
void arm_gen_test_cc(int cc, TCGLabel *label);
|
||||
MemOp pow2_align(unsigned i);
|
||||
void unallocated_encoding(DisasContext *s);
|
||||
void gen_exception_insn_el(DisasContext *s, uint64_t pc, int excp,
|
||||
void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
|
||||
uint32_t syn, uint32_t target_el);
|
||||
void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, uint32_t syn);
|
||||
void gen_exception_insn(DisasContext *s, target_long pc_diff,
|
||||
int excp, uint32_t syn);
|
||||
|
||||
/* Return state of Alternate Half-precision flag, caller frees result */
|
||||
static inline TCGv_i32 get_ahp_flag(void)
|
||||
|
@ -560,6 +588,28 @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
|
|||
*/
|
||||
uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
|
||||
|
||||
/*
|
||||
* gen_disas_label:
|
||||
* Create a label and cache a copy of pc_save.
|
||||
*/
|
||||
static inline DisasLabel gen_disas_label(DisasContext *s)
|
||||
{
|
||||
return (DisasLabel){
|
||||
.label = gen_new_label(),
|
||||
.pc_save = s->pc_save,
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* set_disas_label:
|
||||
* Emit a label and restore the cached copy of pc_save.
|
||||
*/
|
||||
static inline void set_disas_label(DisasContext *s, DisasLabel l)
|
||||
{
|
||||
gen_set_label(l.label);
|
||||
s->pc_save = l.pc_save;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helpers for implementing sets of trans_* functions.
|
||||
* Defer the implementation of NAME to FUNC, with optional extra arguments.
|
||||
|
|
Loading…
Reference in New Issue