target-arm queue:

* monitor: fix query-cpu-model-expansion crash when using machine type none
  * Support emulation of the ARMv8.1-VHE architecture feature
  * bcm2835_dma: fix bugs in TD mode handling
  * docs/arm-cpu-features: Make kvm-no-adjvtime comment clearer
  * stellaris, stm32f2xx_timer, armv7m_systick: fix minor memory leaks
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAl49dPwZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3p9KD/wOFkUyngvYcQ3YZON8CvIU
 UQfrq+akWUwjZprwIEuXn9V0VnE/e4tqKN6C/i3wxtEjcu+Gbuo2LkMN+xH6NPTd
 v/T5fpxb3W4f7wFsh/LEF3UHBncSBMyCRziCFL2R9YQp/sg+kVOSzSdstIJP9Bzb
 jBCBPWN+ea3jhd2j0M4Le3DFOx68TOWfbIaNXnsL1e6mcey8a7ij/Qq9KMw5hHGZ
 pkRn6akjUVCZRd9oIAw3XNS8I6WO0b1I7nL1v4YpaKlXZYWXl2RhpHtlBzhjyhmB
 QhJgIoOVyma9afX9zRGw+hvMOBp+BH4M2X9K0dNUytD3WI2hvEl3Q+ow3bswywsn
 vGMB1o6uw24urSGTh3HzuIMCDBXNpvioU9T3UauL5hwtp+nsMTN+EquBdrpQmSl7
 QfbVpNQsADJ+p9z19ftgu8HUMX7ngWWzZHq45SCJgVgXq7eBv3Tu4X6EoN4JoiOj
 wv0q93GRYTAhKBEEuz0/dsgpBMwtCgumjjAsP3MdMOhfTlTggD8KfNYKesczDQVp
 ABzqZixFwAqXOfdCeHLX1NYdqEuV3EZDyxrVqw4JPkO7ovA+kgZvoBbET0kXZj1o
 TeJ9v3xU5j46K/9L29KkIa1rqp4i5Q7ddLUDC0BBI7N45FBeJoYY84JoCoRgRcl9
 lz/x5DmUqdapkG9UURIKOw==
 =gVP3
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20200207' into staging

target-arm queue:
 * monitor: fix query-cpu-model-expansion crash when using machine type none
 * Support emulation of the ARMv8.1-VHE architecture feature
 * bcm2835_dma: fix bugs in TD mode handling
 * docs/arm-cpu-features: Make kvm-no-adjvtime comment clearer
 * stellaris, stm32f2xx_timer, armv7m_systick: fix minor memory leaks

# gpg: Signature made Fri 07 Feb 2020 14:32:28 GMT
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20200207: (48 commits)
  stellaris: delay timer_new to avoid memleaks
  stm32f2xx_timer: delay timer_new to avoid memleaks
  armv7m_systick: delay timer_new to avoid memleaks
  docs/arm-cpu-features: Make kvm-no-adjvtime comment clearer
  bcm2835_dma: Re-initialize xlen in TD mode
  bcm2835_dma: Fix the ylen loop in TD mode
  target/arm: Raise only one interrupt in arm_cpu_exec_interrupt
  target/arm: Use bool for unmasked in arm_excp_unmasked
  target/arm: Pass more cpu state to arm_excp_unmasked
  target/arm: Move arm_excp_unmasked to cpu.c
  target/arm: Enable ARMv8.1-VHE in -cpu max
  target/arm: Update arm_cpu_do_interrupt_aarch64 for VHE
  target/arm: Update get_a64_user_mem_index for VHE
  target/arm: check TGE and E2H flags for EL0 pauth traps
  target/arm: Update {fp,sve}_exception_el for VHE
  target/arm: Update arm_phys_excp_target_el for TGE
  target/arm: Flush tlbs for E2&0 translation regime
  target/arm: Flush tlb for ASID changes in EL2&0 translation regime
  target/arm: Add VHE timer register redirection and aliasing
  target/arm: Add VHE system register redirection and aliasing
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-02-07 18:02:52 +00:00
commit 93c86fff53
19 changed files with 1415 additions and 692 deletions

View File

@ -185,7 +185,7 @@ the list of KVM VCPU features and their descriptions.
kvm-no-adjvtime By default kvm-no-adjvtime is disabled. This kvm-no-adjvtime By default kvm-no-adjvtime is disabled. This
means that by default the virtual time means that by default the virtual time
adjustment is enabled (vtime is *not not* adjustment is enabled (vtime is not *not*
adjusted). adjusted).
When virtual time adjustment is enabled each When virtual time adjustment is enabled each

View File

@ -347,11 +347,15 @@ static void stellaris_gptm_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem); sysbus_init_mmio(sbd, &s->iomem);
s->opaque[0] = s->opaque[1] = s; s->opaque[0] = s->opaque[1] = s;
}
static void stellaris_gptm_realize(DeviceState *dev, Error **errp)
{
gptm_state *s = STELLARIS_GPTM(dev);
s->timer[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[0]); s->timer[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[0]);
s->timer[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[1]); s->timer[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[1]);
} }
/* System controller. */ /* System controller. */
typedef struct { typedef struct {
@ -1536,6 +1540,7 @@ static void stellaris_gptm_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_stellaris_gptm; dc->vmsd = &vmstate_stellaris_gptm;
dc->realize = stellaris_gptm_realize;
} }
static const TypeInfo stellaris_gptm_info = { static const TypeInfo stellaris_gptm_info = {

View File

@ -54,7 +54,7 @@
static void bcm2835_dma_update(BCM2835DMAState *s, unsigned c) static void bcm2835_dma_update(BCM2835DMAState *s, unsigned c)
{ {
BCM2835DMAChan *ch = &s->chan[c]; BCM2835DMAChan *ch = &s->chan[c];
uint32_t data, xlen, ylen; uint32_t data, xlen, xlen_td, ylen;
int16_t dst_stride, src_stride; int16_t dst_stride, src_stride;
if (!(s->enable & (1 << c))) { if (!(s->enable & (1 << c))) {
@ -70,18 +70,19 @@ static void bcm2835_dma_update(BCM2835DMAState *s, unsigned c)
ch->stride = ldl_le_phys(&s->dma_as, ch->conblk_ad + 16); ch->stride = ldl_le_phys(&s->dma_as, ch->conblk_ad + 16);
ch->nextconbk = ldl_le_phys(&s->dma_as, ch->conblk_ad + 20); ch->nextconbk = ldl_le_phys(&s->dma_as, ch->conblk_ad + 20);
ylen = 1;
if (ch->ti & BCM2708_DMA_TDMODE) { if (ch->ti & BCM2708_DMA_TDMODE) {
/* 2D transfer mode */ /* 2D transfer mode */
ylen = (ch->txfr_len >> 16) & 0x3fff; ylen += (ch->txfr_len >> 16) & 0x3fff;
xlen = ch->txfr_len & 0xffff; xlen = ch->txfr_len & 0xffff;
dst_stride = ch->stride >> 16; dst_stride = ch->stride >> 16;
src_stride = ch->stride & 0xffff; src_stride = ch->stride & 0xffff;
} else { } else {
ylen = 1;
xlen = ch->txfr_len; xlen = ch->txfr_len;
dst_stride = 0; dst_stride = 0;
src_stride = 0; src_stride = 0;
} }
xlen_td = xlen;
while (ylen != 0) { while (ylen != 0) {
/* Normal transfer mode */ /* Normal transfer mode */
@ -117,6 +118,7 @@ static void bcm2835_dma_update(BCM2835DMAState *s, unsigned c)
if (--ylen != 0) { if (--ylen != 0) {
ch->source_ad += src_stride; ch->source_ad += src_stride;
ch->dest_ad += dst_stride; ch->dest_ad += dst_stride;
xlen = xlen_td;
} }
} }
ch->cs |= BCM2708_DMA_END; ch->cs |= BCM2708_DMA_END;

View File

@ -216,6 +216,11 @@ static void systick_instance_init(Object *obj)
memory_region_init_io(&s->iomem, obj, &systick_ops, s, "systick", 0xe0); memory_region_init_io(&s->iomem, obj, &systick_ops, s, "systick", 0xe0);
sysbus_init_mmio(sbd, &s->iomem); sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->irq); sysbus_init_irq(sbd, &s->irq);
}
static void systick_realize(DeviceState *dev, Error **errp)
{
SysTickState *s = SYSTICK(dev);
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, systick_timer_tick, s); s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, systick_timer_tick, s);
} }
@ -238,6 +243,7 @@ static void systick_class_init(ObjectClass *klass, void *data)
dc->vmsd = &vmstate_systick; dc->vmsd = &vmstate_systick;
dc->reset = systick_reset; dc->reset = systick_reset;
dc->realize = systick_realize;
} }
static const TypeInfo armv7m_systick_info = { static const TypeInfo armv7m_systick_info = {

View File

@ -314,7 +314,11 @@ static void stm32f2xx_timer_init(Object *obj)
memory_region_init_io(&s->iomem, obj, &stm32f2xx_timer_ops, s, memory_region_init_io(&s->iomem, obj, &stm32f2xx_timer_ops, s,
"stm32f2xx_timer", 0x400); "stm32f2xx_timer", 0x400);
sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem); sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem);
}
static void stm32f2xx_timer_realize(DeviceState *dev, Error **errp)
{
STM32F2XXTimerState *s = STM32F2XXTIMER(dev);
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, stm32f2xx_timer_interrupt, s); s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, stm32f2xx_timer_interrupt, s);
} }
@ -325,6 +329,7 @@ static void stm32f2xx_timer_class_init(ObjectClass *klass, void *data)
dc->reset = stm32f2xx_timer_reset; dc->reset = stm32f2xx_timer_reset;
device_class_set_props(dc, stm32f2xx_timer_properties); device_class_set_props(dc, stm32f2xx_timer_properties);
dc->vmsd = &vmstate_stm32f2xx_timer; dc->vmsd = &vmstate_stm32f2xx_timer;
dc->realize = stm32f2xx_timer_realize;
} }
static const TypeInfo stm32f2xx_timer_info = { static const TypeInfo stm32f2xx_timer_info = {

View File

@ -29,6 +29,6 @@
# define TARGET_PAGE_BITS_MIN 10 # define TARGET_PAGE_BITS_MIN 10
#endif #endif
#define NB_MMU_MODES 8 #define NB_MMU_MODES 9
#endif #endif

View File

@ -76,6 +76,7 @@ void arm_gt_ptimer_cb(void *opaque);
void arm_gt_vtimer_cb(void *opaque); void arm_gt_vtimer_cb(void *opaque);
void arm_gt_htimer_cb(void *opaque); void arm_gt_htimer_cb(void *opaque);
void arm_gt_stimer_cb(void *opaque); void arm_gt_stimer_cb(void *opaque);
void arm_gt_hvtimer_cb(void *opaque);
#define ARM_AFF0_SHIFT 0 #define ARM_AFF0_SHIFT 0
#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT) #define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT)

View File

@ -410,58 +410,173 @@ static void arm_cpu_reset(CPUState *s)
arm_rebuild_hflags(env); arm_rebuild_hflags(env);
} }
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el,
unsigned int cur_el, bool secure,
uint64_t hcr_el2)
{
CPUARMState *env = cs->env_ptr;
bool pstate_unmasked;
bool unmasked = false;
/*
* Don't take exceptions if they target a lower EL.
* This check should catch any exceptions that would not be taken
* but left pending.
*/
if (cur_el > target_el) {
return false;
}
switch (excp_idx) {
case EXCP_FIQ:
pstate_unmasked = !(env->daif & PSTATE_F);
break;
case EXCP_IRQ:
pstate_unmasked = !(env->daif & PSTATE_I);
break;
case EXCP_VFIQ:
if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_F);
case EXCP_VIRQ:
if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_I);
default:
g_assert_not_reached();
}
/*
* Use the target EL, current execution state and SCR/HCR settings to
* determine whether the corresponding CPSR bit is used to mask the
* interrupt.
*/
if ((target_el > cur_el) && (target_el != 1)) {
/* Exceptions targeting a higher EL may not be maskable */
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
/*
* 64-bit masking rules are simple: exceptions to EL3
* can't be masked, and exceptions to EL2 can only be
* masked from Secure state. The HCR and SCR settings
* don't affect the masking logic, only the interrupt routing.
*/
if (target_el == 3 || !secure) {
unmasked = true;
}
} else {
/*
* The old 32-bit-only environment has a more complicated
* masking setup. HCR and SCR bits not only affect interrupt
* routing but also change the behaviour of masking.
*/
bool hcr, scr;
switch (excp_idx) {
case EXCP_FIQ:
/*
* If FIQs are routed to EL3 or EL2 then there are cases where
* we override the CPSR.F in determining if the exception is
* masked or not. If neither of these are set then we fall back
* to the CPSR.F setting otherwise we further assess the state
* below.
*/
hcr = hcr_el2 & HCR_FMO;
scr = (env->cp15.scr_el3 & SCR_FIQ);
/*
* When EL3 is 32-bit, the SCR.FW bit controls whether the
* CPSR.F bit masks FIQ interrupts when taken in non-secure
* state. If SCR.FW is set then FIQs can be masked by CPSR.F
* when non-secure but only when FIQs are only routed to EL3.
*/
scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
break;
case EXCP_IRQ:
/*
* When EL3 execution state is 32-bit, if HCR.IMO is set then
* we may override the CPSR.I masking when in non-secure state.
* The SCR.IRQ setting has already been taken into consideration
* when setting the target EL, so it does not have a further
* affect here.
*/
hcr = hcr_el2 & HCR_IMO;
scr = false;
break;
default:
g_assert_not_reached();
}
if ((scr || hcr) && !secure) {
unmasked = true;
}
}
}
/*
* The PSTATE bits only mask the interrupt if we have not overriden the
* ability above.
*/
return unmasked || pstate_unmasked;
}
bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{ {
CPUClass *cc = CPU_GET_CLASS(cs); CPUClass *cc = CPU_GET_CLASS(cs);
CPUARMState *env = cs->env_ptr; CPUARMState *env = cs->env_ptr;
uint32_t cur_el = arm_current_el(env); uint32_t cur_el = arm_current_el(env);
bool secure = arm_is_secure(env); bool secure = arm_is_secure(env);
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
uint32_t target_el; uint32_t target_el;
uint32_t excp_idx; uint32_t excp_idx;
bool ret = false;
/* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
if (interrupt_request & CPU_INTERRUPT_FIQ) { if (interrupt_request & CPU_INTERRUPT_FIQ) {
excp_idx = EXCP_FIQ; excp_idx = EXCP_FIQ;
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
if (arm_excp_unmasked(cs, excp_idx, target_el)) { if (arm_excp_unmasked(cs, excp_idx, target_el,
cs->exception_index = excp_idx; cur_el, secure, hcr_el2)) {
env->exception.target_el = target_el; goto found;
cc->do_interrupt(cs);
ret = true;
} }
} }
if (interrupt_request & CPU_INTERRUPT_HARD) { if (interrupt_request & CPU_INTERRUPT_HARD) {
excp_idx = EXCP_IRQ; excp_idx = EXCP_IRQ;
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
if (arm_excp_unmasked(cs, excp_idx, target_el)) { if (arm_excp_unmasked(cs, excp_idx, target_el,
cs->exception_index = excp_idx; cur_el, secure, hcr_el2)) {
env->exception.target_el = target_el; goto found;
cc->do_interrupt(cs);
ret = true;
} }
} }
if (interrupt_request & CPU_INTERRUPT_VIRQ) { if (interrupt_request & CPU_INTERRUPT_VIRQ) {
excp_idx = EXCP_VIRQ; excp_idx = EXCP_VIRQ;
target_el = 1; target_el = 1;
if (arm_excp_unmasked(cs, excp_idx, target_el)) { if (arm_excp_unmasked(cs, excp_idx, target_el,
cs->exception_index = excp_idx; cur_el, secure, hcr_el2)) {
env->exception.target_el = target_el; goto found;
cc->do_interrupt(cs);
ret = true;
} }
} }
if (interrupt_request & CPU_INTERRUPT_VFIQ) { if (interrupt_request & CPU_INTERRUPT_VFIQ) {
excp_idx = EXCP_VFIQ; excp_idx = EXCP_VFIQ;
target_el = 1; target_el = 1;
if (arm_excp_unmasked(cs, excp_idx, target_el)) { if (arm_excp_unmasked(cs, excp_idx, target_el,
cs->exception_index = excp_idx; cur_el, secure, hcr_el2)) {
env->exception.target_el = target_el; goto found;
cc->do_interrupt(cs);
ret = true;
} }
} }
return false;
return ret; found:
cs->exception_index = excp_idx;
env->exception.target_el = target_el;
cc->do_interrupt(cs);
return true;
} }
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
@ -1272,7 +1387,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
} }
} }
{ {
uint64_t scale; uint64_t scale;
@ -1295,6 +1409,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
arm_gt_htimer_cb, cpu); arm_gt_htimer_cb, cpu);
cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, scale, cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
arm_gt_stimer_cb, cpu); arm_gt_stimer_cb, cpu);
cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
arm_gt_hvtimer_cb, cpu);
} }
#endif #endif

View File

@ -144,11 +144,12 @@ typedef struct ARMGenericTimer {
uint64_t ctl; /* Timer Control register */ uint64_t ctl; /* Timer Control register */
} ARMGenericTimer; } ARMGenericTimer;
#define GTIMER_PHYS 0 #define GTIMER_PHYS 0
#define GTIMER_VIRT 1 #define GTIMER_VIRT 1
#define GTIMER_HYP 2 #define GTIMER_HYP 2
#define GTIMER_SEC 3 #define GTIMER_SEC 3
#define NUM_GTIMERS 4 #define GTIMER_HYPVIRT 4
#define NUM_GTIMERS 5
typedef struct { typedef struct {
uint64_t raw_tcr; uint64_t raw_tcr;
@ -1424,13 +1425,6 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define HCR_ATA (1ULL << 56) #define HCR_ATA (1ULL << 56)
#define HCR_DCT (1ULL << 57) #define HCR_DCT (1ULL << 57)
/*
* When we actually implement ARMv8.1-VHE we should add HCR_E2H to
* HCR_MASK and then clear it again if the feature bit is not set in
* hcr_write().
*/
#define HCR_MASK ((1ULL << 34) - 1)
#define SCR_NS (1U << 0) #define SCR_NS (1U << 0)
#define SCR_IRQ (1U << 1) #define SCR_IRQ (1U << 1)
#define SCR_FIQ (1U << 2) #define SCR_FIQ (1U << 2)
@ -2582,6 +2576,19 @@ struct ARMCPRegInfo {
* fieldoffset is 0 then no reset will be done. * fieldoffset is 0 then no reset will be done.
*/ */
CPResetFn *resetfn; CPResetFn *resetfn;
/*
* "Original" writefn and readfn.
* For ARMv8.1-VHE register aliases, we overwrite the read/write
* accessor functions of various EL1/EL0 to perform the runtime
* check for which sysreg should actually be modified, and then
* forwards the operation. Before overwriting the accessors,
* the original function is copied here, so that accesses that
* really do go to the EL1/EL0 version proceed normally.
* (The corresponding EL2 register is linked via opaque.)
*/
CPReadFn *orig_readfn;
CPWriteFn *orig_writefn;
}; };
/* Macros which are lvalues for the field in CPUARMState for the /* Macros which are lvalues for the field in CPUARMState for the
@ -2702,117 +2709,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
#define ARM_CPUID_TI915T 0x54029152 #define ARM_CPUID_TI915T 0x54029152
#define ARM_CPUID_TI925T 0x54029252 #define ARM_CPUID_TI925T 0x54029252
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el)
{
CPUARMState *env = cs->env_ptr;
unsigned int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
bool pstate_unmasked;
int8_t unmasked = 0;
uint64_t hcr_el2;
/* Don't take exceptions if they target a lower EL.
* This check should catch any exceptions that would not be taken but left
* pending.
*/
if (cur_el > target_el) {
return false;
}
hcr_el2 = arm_hcr_el2_eff(env);
switch (excp_idx) {
case EXCP_FIQ:
pstate_unmasked = !(env->daif & PSTATE_F);
break;
case EXCP_IRQ:
pstate_unmasked = !(env->daif & PSTATE_I);
break;
case EXCP_VFIQ:
if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_F);
case EXCP_VIRQ:
if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_I);
default:
g_assert_not_reached();
}
/* Use the target EL, current execution state and SCR/HCR settings to
* determine whether the corresponding CPSR bit is used to mask the
* interrupt.
*/
if ((target_el > cur_el) && (target_el != 1)) {
/* Exceptions targeting a higher EL may not be maskable */
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
/* 64-bit masking rules are simple: exceptions to EL3
* can't be masked, and exceptions to EL2 can only be
* masked from Secure state. The HCR and SCR settings
* don't affect the masking logic, only the interrupt routing.
*/
if (target_el == 3 || !secure) {
unmasked = 1;
}
} else {
/* The old 32-bit-only environment has a more complicated
* masking setup. HCR and SCR bits not only affect interrupt
* routing but also change the behaviour of masking.
*/
bool hcr, scr;
switch (excp_idx) {
case EXCP_FIQ:
/* If FIQs are routed to EL3 or EL2 then there are cases where
* we override the CPSR.F in determining if the exception is
* masked or not. If neither of these are set then we fall back
* to the CPSR.F setting otherwise we further assess the state
* below.
*/
hcr = hcr_el2 & HCR_FMO;
scr = (env->cp15.scr_el3 & SCR_FIQ);
/* When EL3 is 32-bit, the SCR.FW bit controls whether the
* CPSR.F bit masks FIQ interrupts when taken in non-secure
* state. If SCR.FW is set then FIQs can be masked by CPSR.F
* when non-secure but only when FIQs are only routed to EL3.
*/
scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
break;
case EXCP_IRQ:
/* When EL3 execution state is 32-bit, if HCR.IMO is set then
* we may override the CPSR.I masking when in non-secure state.
* The SCR.IRQ setting has already been taken into consideration
* when setting the target EL, so it does not have a further
* affect here.
*/
hcr = hcr_el2 & HCR_IMO;
scr = false;
break;
default:
g_assert_not_reached();
}
if ((scr || hcr) && !secure) {
unmasked = 1;
}
}
}
/* The PSTATE bits only mask the interrupt if we have not overriden the
* ability above.
*/
return unmasked || pstate_unmasked;
}
#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU #define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
#define CPU_RESOLVING_TYPE TYPE_ARM_CPU #define CPU_RESOLVING_TYPE TYPE_ARM_CPU
@ -2826,18 +2722,21 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* + NonSecure EL1 & 0 stage 1 * + NonSecure EL1 & 0 stage 1
* + NonSecure EL1 & 0 stage 2 * + NonSecure EL1 & 0 stage 2
* + NonSecure EL2 * + NonSecure EL2
* + Secure EL1 & EL0 * + NonSecure EL2 & 0 (ARMv8.1-VHE)
* + Secure EL1 & 0
* + Secure EL3 * + Secure EL3
* If EL3 is 32-bit: * If EL3 is 32-bit:
* + NonSecure PL1 & 0 stage 1 * + NonSecure PL1 & 0 stage 1
* + NonSecure PL1 & 0 stage 2 * + NonSecure PL1 & 0 stage 2
* + NonSecure PL2 * + NonSecure PL2
* + Secure PL0 & PL1 * + Secure PL0
* + Secure PL1
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
* *
* For QEMU, an mmu_idx is not quite the same as a translation regime because: * For QEMU, an mmu_idx is not quite the same as a translation regime because:
* 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
* may differ in access permissions even if the VA->PA map is the same * because they may differ in access permissions even if the VA->PA map is
* the same
* 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
* translation, which means that we have one mmu_idx that deals with two * translation, which means that we have one mmu_idx that deals with two
* concatenated translation regimes [this sort of combined s1+2 TLB is * concatenated translation regimes [this sort of combined s1+2 TLB is
@ -2849,19 +2748,23 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
* translation regimes, because they map reasonably well to each other * translation regimes, because they map reasonably well to each other
* and they can't both be active at the same time. * and they can't both be active at the same time.
* This gives us the following list of mmu_idx values: * 5. we want to be able to use the TLB for accesses done as part of a
* stage1 page table walk, rather than having to walk the stage2 page
* table over and over.
* *
* NS EL0 (aka NS PL0) stage 1+2 * This gives us the following list of cases:
* NS EL1 (aka NS PL1) stage 1+2 *
* NS EL0 EL1&0 stage 1+2 (aka NS PL0)
* NS EL1 EL1&0 stage 1+2 (aka NS PL1)
* NS EL0 EL2&0
* NS EL2 EL2&0
* NS EL2 (aka NS PL2) * NS EL2 (aka NS PL2)
* S EL0 EL1&0 (aka S PL0)
* S EL1 EL1&0 (not used if EL3 is 32 bit)
* S EL3 (aka S PL1) * S EL3 (aka S PL1)
* S EL0 (aka S PL0) * NS EL1&0 stage 2
* S EL1 (not used if EL3 is 32 bit)
* NS EL0+1 stage 2
* *
* (The last of these is an mmu_idx because we want to be able to use the TLB * for a total of 9 different mmu_idx.
* for the accesses done as part of a stage 1 page table walk, rather than
* having to walk the stage 2 page table over and over.)
* *
* R profile CPUs have an MPU, but can use the same set of MMU indexes * R profile CPUs have an MPU, but can use the same set of MMU indexes
* as A profile. They only need to distinguish NS EL0 and NS EL1 (and * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
@ -2899,107 +2802,89 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* For M profile we arrange them to have a bit for priv, a bit for negpri * For M profile we arrange them to have a bit for priv, a bit for negpri
* and a bit for secure. * and a bit for secure.
*/ */
#define ARM_MMU_IDX_A 0x10 /* A profile */ #define ARM_MMU_IDX_A 0x10 /* A profile */
#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ #define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
#define ARM_MMU_IDX_M 0x40 /* M profile */ #define ARM_MMU_IDX_M 0x40 /* M profile */
/* meanings of the bits for M profile mmu idx values */ /* Meanings of the bits for M profile mmu idx values */
#define ARM_MMU_IDX_M_PRIV 0x1 #define ARM_MMU_IDX_M_PRIV 0x1
#define ARM_MMU_IDX_M_NEGPRI 0x2 #define ARM_MMU_IDX_M_NEGPRI 0x2
#define ARM_MMU_IDX_M_S 0x4 #define ARM_MMU_IDX_M_S 0x4 /* Secure */
#define ARM_MMU_IDX_TYPE_MASK (~0x7) #define ARM_MMU_IDX_TYPE_MASK \
#define ARM_MMU_IDX_COREIDX_MASK 0x7 (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
#define ARM_MMU_IDX_COREIDX_MASK 0xf
typedef enum ARMMMUIdx { typedef enum ARMMMUIdx {
ARMMMUIdx_S12NSE0 = 0 | ARM_MMU_IDX_A, /*
ARMMMUIdx_S12NSE1 = 1 | ARM_MMU_IDX_A, * A-profile.
ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A, */
ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A, ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A, ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
ARMMMUIdx_S1SE1 = 5 | ARM_MMU_IDX_A,
ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A, ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M,
ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M, ARMMMUIdx_E2 = 3 | ARM_MMU_IDX_A,
ARMMMUIdx_MUserNegPri = 2 | ARM_MMU_IDX_M, ARMMMUIdx_E20_2 = 4 | ARM_MMU_IDX_A,
ARMMMUIdx_MPrivNegPri = 3 | ARM_MMU_IDX_M,
ARMMMUIdx_MSUser = 4 | ARM_MMU_IDX_M, ARMMMUIdx_SE10_0 = 5 | ARM_MMU_IDX_A,
ARMMMUIdx_MSPriv = 5 | ARM_MMU_IDX_M, ARMMMUIdx_SE10_1 = 6 | ARM_MMU_IDX_A,
ARMMMUIdx_MSUserNegPri = 6 | ARM_MMU_IDX_M, ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
ARMMMUIdx_MSPrivNegPri = 7 | ARM_MMU_IDX_M,
/* Indexes below here don't have TLBs and are used only for AT system ARMMMUIdx_Stage2 = 8 | ARM_MMU_IDX_A,
/*
* These are not allocated TLBs and are used only for AT system
* instructions or for the first stage of an S12 page table walk. * instructions or for the first stage of an S12 page table walk.
*/ */
ARMMMUIdx_S1NSE0 = 0 | ARM_MMU_IDX_NOTLB, ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_S1NSE1 = 1 | ARM_MMU_IDX_NOTLB, ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
/*
* M-profile.
*/
ARMMMUIdx_MUser = ARM_MMU_IDX_M,
ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
} ARMMMUIdx; } ARMMMUIdx;
/* Bit macros for the core-mmu-index values for each index, /*
* Bit macros for the core-mmu-index values for each index,
* for use when calling tlb_flush_by_mmuidx() and friends. * for use when calling tlb_flush_by_mmuidx() and friends.
*/ */
#define TO_CORE_BIT(NAME) \
ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
typedef enum ARMMMUIdxBit { typedef enum ARMMMUIdxBit {
ARMMMUIdxBit_S12NSE0 = 1 << 0, TO_CORE_BIT(E10_0),
ARMMMUIdxBit_S12NSE1 = 1 << 1, TO_CORE_BIT(E20_0),
ARMMMUIdxBit_S1E2 = 1 << 2, TO_CORE_BIT(E10_1),
ARMMMUIdxBit_S1E3 = 1 << 3, TO_CORE_BIT(E2),
ARMMMUIdxBit_S1SE0 = 1 << 4, TO_CORE_BIT(E20_2),
ARMMMUIdxBit_S1SE1 = 1 << 5, TO_CORE_BIT(SE10_0),
ARMMMUIdxBit_S2NS = 1 << 6, TO_CORE_BIT(SE10_1),
ARMMMUIdxBit_MUser = 1 << 0, TO_CORE_BIT(SE3),
ARMMMUIdxBit_MPriv = 1 << 1, TO_CORE_BIT(Stage2),
ARMMMUIdxBit_MUserNegPri = 1 << 2,
ARMMMUIdxBit_MPrivNegPri = 1 << 3, TO_CORE_BIT(MUser),
ARMMMUIdxBit_MSUser = 1 << 4, TO_CORE_BIT(MPriv),
ARMMMUIdxBit_MSPriv = 1 << 5, TO_CORE_BIT(MUserNegPri),
ARMMMUIdxBit_MSUserNegPri = 1 << 6, TO_CORE_BIT(MPrivNegPri),
ARMMMUIdxBit_MSPrivNegPri = 1 << 7, TO_CORE_BIT(MSUser),
TO_CORE_BIT(MSPriv),
TO_CORE_BIT(MSUserNegPri),
TO_CORE_BIT(MSPrivNegPri),
} ARMMMUIdxBit; } ARMMMUIdxBit;
#undef TO_CORE_BIT
#define MMU_USER_IDX 0 #define MMU_USER_IDX 0
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
{
return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
}
static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
{
if (arm_feature(env, ARM_FEATURE_M)) {
return mmu_idx | ARM_MMU_IDX_M;
} else {
return mmu_idx | ARM_MMU_IDX_A;
}
}
/* Return the exception level we're running at if this is our mmu_idx */
static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
{
switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) {
case ARM_MMU_IDX_A:
return mmu_idx & 3;
case ARM_MMU_IDX_M:
return mmu_idx & ARM_MMU_IDX_M_PRIV;
default:
g_assert_not_reached();
}
}
/*
* Return the MMU index for a v7M CPU with all relevant information
* manually specified.
*/
ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
bool secstate, bool priv, bool negpri);
/* Return the MMU index for a v7M CPU in the specified security and
* privilege state.
*/
ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
bool secstate, bool priv);
/* Return the MMU index for a v7M CPU in the specified security state */
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
/** /**
* cpu_mmu_index: * cpu_mmu_index:
* @env: The cpu environment * @env: The cpu environment
@ -3159,15 +3044,7 @@ static inline bool arm_sctlr_b(CPUARMState *env)
(env->cp15.sctlr_el[1] & SCTLR_B) != 0; (env->cp15.sctlr_el[1] & SCTLR_B) != 0;
} }
static inline uint64_t arm_sctlr(CPUARMState *env, int el) uint64_t arm_sctlr(CPUARMState *env, int el);
{
if (el == 0) {
/* FIXME: ARMv8.1-VHE S2 translation regime. */
return env->cp15.sctlr_el[1];
} else {
return env->cp15.sctlr_el[el];
}
}
static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
bool sctlr_b) bool sctlr_b)
@ -3221,55 +3098,73 @@ typedef ARMCPU ArchCPU;
* We put flags which are shared between 32 and 64 bit mode at the top * We put flags which are shared between 32 and 64 bit mode at the top
* of the word, and flags which apply to only one mode at the bottom. * of the word, and flags which apply to only one mode at the bottom.
* *
* 31 20 18 14 9 0
* +--------------+-----+-----+----------+--------------+
* | | | TBFLAG_A32 | |
* | | +-----+----------+ TBFLAG_AM32 |
* | TBFLAG_ANY | |TBFLAG_M32| |
* | | +-+----------+--------------|
* | | | TBFLAG_A64 |
* +--------------+---------+---------------------------+
* 31 20 15 0
*
* Unless otherwise noted, these bits are cached in env->hflags. * Unless otherwise noted, these bits are cached in env->hflags.
*/ */
FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1) FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1)
FIELD(TBFLAG_ANY, MMUIDX, 28, 3) FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1)
FIELD(TBFLAG_ANY, SS_ACTIVE, 27, 1) FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */
FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1) /* Not cached. */ FIELD(TBFLAG_ANY, BE_DATA, 28, 1)
FIELD(TBFLAG_ANY, MMUIDX, 24, 4)
/* Target EL if we take a floating-point-disabled exception */ /* Target EL if we take a floating-point-disabled exception */
FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2) FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2)
FIELD(TBFLAG_ANY, BE_DATA, 23, 1) /* For A-profile only, target EL for debug exceptions. */
/* FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2)
* For A-profile only, target EL for debug exceptions.
* Note that this overlaps with the M-profile-only HANDLER and STACKCHECK bits.
*/
FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2)
/* Bit usage when in AArch32 state: */ /*
FIELD(TBFLAG_A32, THUMB, 0, 1) /* Not cached. */ * Bit usage when in AArch32 state, both A- and M-profile.
FIELD(TBFLAG_A32, VECLEN, 1, 3) /* Not cached. */ */
FIELD(TBFLAG_A32, VECSTRIDE, 4, 2) /* Not cached. */ FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */
FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */
/*
* Bit usage when in AArch32 state, for A-profile only.
*/
FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */
FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */
/* /*
* We store the bottom two bits of the CPAR as TB flags and handle * We store the bottom two bits of the CPAR as TB flags and handle
* checks on the other bits at runtime. This shares the same bits as * checks on the other bits at runtime. This shares the same bits as
* VECSTRIDE, which is OK as no XScale CPU has VFP. * VECSTRIDE, which is OK as no XScale CPU has VFP.
* Not cached, because VECLEN+VECSTRIDE are not cached. * Not cached, because VECLEN+VECSTRIDE are not cached.
*/ */
FIELD(TBFLAG_A32, XSCALE_CPAR, 4, 2) FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2)
FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */
FIELD(TBFLAG_A32, SCTLR_B, 15, 1)
FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1)
/* /*
* Indicates whether cp register reads and writes by guest code should access * Indicates whether cp register reads and writes by guest code should access
* the secure or nonsecure bank of banked registers; note that this is not * the secure or nonsecure bank of banked registers; note that this is not
* the same thing as the current security state of the processor! * the same thing as the current security state of the processor!
*/ */
FIELD(TBFLAG_A32, NS, 6, 1) FIELD(TBFLAG_A32, NS, 17, 1)
FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
FIELD(TBFLAG_A32, CONDEXEC, 8, 8) /* Not cached. */
FIELD(TBFLAG_A32, SCTLR_B, 16, 1)
FIELD(TBFLAG_A32, HSTR_ACTIVE, 17, 1)
/* For M profile only, set if FPCCR.LSPACT is set */ /*
FIELD(TBFLAG_A32, LSPACT, 18, 1) /* Not cached. */ * Bit usage when in AArch32 state, for M-profile only.
/* For M profile only, set if we must create a new FP context */ */
FIELD(TBFLAG_A32, NEW_FP_CTXT_NEEDED, 19, 1) /* Not cached. */ /* Handler (ie not Thread) mode */
/* For M profile only, set if FPCCR.S does not match current security state */ FIELD(TBFLAG_M32, HANDLER, 9, 1)
FIELD(TBFLAG_A32, FPCCR_S_WRONG, 20, 1) /* Not cached. */ /* Whether we should generate stack-limit checks */
/* For M profile only, Handler (ie not Thread) mode */ FIELD(TBFLAG_M32, STACKCHECK, 10, 1)
FIELD(TBFLAG_A32, HANDLER, 21, 1) /* Set if FPCCR.LSPACT is set */
/* For M profile only, whether we should generate stack-limit checks */ FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */
FIELD(TBFLAG_A32, STACKCHECK, 22, 1) /* Set if we must create a new FP context */
FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */
/* Set if FPCCR.S does not match current security state */
FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */
/* Bit usage when in AArch64 state */ /*
* Bit usage when in AArch64 state
*/
FIELD(TBFLAG_A64, TBII, 0, 2) FIELD(TBFLAG_A64, TBII, 0, 2)
FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2) FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
FIELD(TBFLAG_A64, ZCR_LEN, 4, 4) FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
@ -3277,6 +3172,7 @@ FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
FIELD(TBFLAG_A64, BT, 9, 1) FIELD(TBFLAG_A64, BT, 9, 1)
FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */ FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */
FIELD(TBFLAG_A64, TBID, 12, 2) FIELD(TBFLAG_A64, TBID, 12, 2)
FIELD(TBFLAG_A64, UNPRIV, 14, 1)
static inline bool bswap_code(bool sctlr_b) static inline bool bswap_code(bool sctlr_b)
{ {
@ -3685,6 +3581,11 @@ static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
} }
static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;
}
static inline bool isar_feature_aa64_lor(const ARMISARegisters *id) static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
{ {
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0; return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;

View File

@ -672,6 +672,7 @@ static void aarch64_max_initfn(Object *obj)
t = cpu->isar.id_aa64mmfr1; t = cpu->isar.id_aa64mmfr1;
t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */ t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
cpu->isar.id_aa64mmfr1 = t; cpu->isar.id_aa64mmfr1 = t;
/* Replicate the same data to the 32-bit id registers. */ /* Replicate the same data to the 32-bit id registers. */

View File

@ -20,6 +20,7 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
int ctx_cmps = extract32(cpu->dbgdidr, 20, 4); int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
int bt; int bt;
uint32_t contextidr; uint32_t contextidr;
uint64_t hcr_el2;
/* /*
* Links to unimplemented or non-context aware breakpoints are * Links to unimplemented or non-context aware breakpoints are
@ -40,24 +41,44 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
} }
bt = extract64(bcr, 20, 4); bt = extract64(bcr, 20, 4);
hcr_el2 = arm_hcr_el2_eff(env);
/*
* We match the whole register even if this is AArch32 using the
* short descriptor format (in which case it holds both PROCID and ASID),
* since we don't implement the optional v7 context ID masking.
*/
contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
switch (bt) { switch (bt) {
case 3: /* linked context ID match */ case 3: /* linked context ID match */
if (arm_current_el(env) > 1) { switch (arm_current_el(env)) {
/* Context matches never fire in EL2 or (AArch64) EL3 */ default:
/* Context matches never fire in AArch64 EL3 */
return false; return false;
case 2:
if (!(hcr_el2 & HCR_E2H)) {
/* Context matches never fire in EL2 without E2H enabled. */
return false;
}
contextidr = env->cp15.contextidr_el[2];
break;
case 1:
contextidr = env->cp15.contextidr_el[1];
break;
case 0:
if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
contextidr = env->cp15.contextidr_el[2];
} else {
contextidr = env->cp15.contextidr_el[1];
}
break;
} }
return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32)); break;
case 5: /* linked address mismatch (reserved in AArch64) */
case 7: /* linked contextidr_el1 match */
contextidr = env->cp15.contextidr_el[1];
break;
case 13: /* linked contextidr_el2 match */
contextidr = env->cp15.contextidr_el[2];
break;
case 9: /* linked VMID match (reserved if no EL2) */ case 9: /* linked VMID match (reserved if no EL2) */
case 11: /* linked context ID and VMID match (reserved if no EL2) */ case 11: /* linked context ID and VMID match (reserved if no EL2) */
case 15: /* linked full context ID match */
default: default:
/* /*
* Links to Unlinked context breakpoints must generate no * Links to Unlinked context breakpoints must generate no
@ -66,7 +87,12 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
return false; return false;
} }
return false; /*
* We match the whole register even if this is AArch32 using the
* short descriptor format (in which case it holds both PROCID and ASID),
* since we don't implement the optional v7 context ID masking.
*/
return contextidr == (uint32_t)env->cp15.dbgbvr[lbn];
} }
static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)

View File

@ -70,7 +70,7 @@ static void daif_check(CPUARMState *env, uint32_t op,
uint32_t imm, uintptr_t ra) uint32_t imm, uintptr_t ra)
{ {
/* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */ /* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) { if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
raise_exception_ra(env, EXCP_UDEF, raise_exception_ra(env, EXCP_UDEF,
syn_aa64_sysregtrap(0, extract32(op, 0, 3), syn_aa64_sysregtrap(0, extract32(op, 0, 3),
extract32(op, 3, 3), 4, extract32(op, 3, 3), 4,

File diff suppressed because it is too large Load Diff

View File

@ -769,6 +769,39 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr); bool probe, uintptr_t retaddr);
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
{
return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
}
static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
{
if (arm_feature(env, ARM_FEATURE_M)) {
return mmu_idx | ARM_MMU_IDX_M;
} else {
return mmu_idx | ARM_MMU_IDX_A;
}
}
int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
/*
* Return the MMU index for a v7M CPU with all relevant information
* manually specified.
*/
ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
bool secstate, bool priv, bool negpri);
/*
* Return the MMU index for a v7M CPU in the specified security and
* privilege state.
*/
ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
bool secstate, bool priv);
/* Return the MMU index for a v7M CPU in the specified security state */
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
/* Return true if the stage 1 translation regime is using LPAE format page /* Return true if the stage 1 translation regime is using LPAE format page
* tables */ * tables */
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
@ -804,24 +837,44 @@ static inline void arm_call_el_change_hook(ARMCPU *cpu)
} }
} }
/* Return true if this address translation regime has two ranges. */
static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_SE10_0:
case ARMMMUIdx_SE10_1:
return true;
default:
return false;
}
}
/* Return true if this address translation regime is secure */ /* Return true if this address translation regime is secure */
static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
{ {
switch (mmu_idx) { switch (mmu_idx) {
case ARMMMUIdx_S12NSE0: case ARMMMUIdx_E10_0:
case ARMMMUIdx_S12NSE1: case ARMMMUIdx_E10_1:
case ARMMMUIdx_S1NSE0: case ARMMMUIdx_E20_0:
case ARMMMUIdx_S1NSE1: case ARMMMUIdx_E20_2:
case ARMMMUIdx_S1E2: case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_S2NS: case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_E2:
case ARMMMUIdx_Stage2:
case ARMMMUIdx_MPrivNegPri: case ARMMMUIdx_MPrivNegPri:
case ARMMMUIdx_MUserNegPri: case ARMMMUIdx_MUserNegPri:
case ARMMMUIdx_MPriv: case ARMMMUIdx_MPriv:
case ARMMMUIdx_MUser: case ARMMMUIdx_MUser:
return false; return false;
case ARMMMUIdx_S1E3: case ARMMMUIdx_SE3:
case ARMMMUIdx_S1SE0: case ARMMMUIdx_SE10_0:
case ARMMMUIdx_S1SE1: case ARMMMUIdx_SE10_1:
case ARMMMUIdx_MSPrivNegPri: case ARMMMUIdx_MSPrivNegPri:
case ARMMMUIdx_MSUserNegPri: case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPriv: case ARMMMUIdx_MSPriv:
@ -975,7 +1028,7 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env);
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
{ {
return ARMMMUIdx_S1NSE0; return ARMMMUIdx_Stage1_E0;
} }
#else #else
ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);

View File

@ -137,17 +137,20 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
} }
if (kvm_enabled()) { if (kvm_enabled()) {
const char *cpu_type = current_machine->cpu_type;
int len = strlen(cpu_type) - strlen(ARM_CPU_TYPE_SUFFIX);
bool supported = false; bool supported = false;
if (!strcmp(model->name, "host") || !strcmp(model->name, "max")) { if (!strcmp(model->name, "host") || !strcmp(model->name, "max")) {
/* These are kvmarm's recommended cpu types */ /* These are kvmarm's recommended cpu types */
supported = true; supported = true;
} else if (strlen(model->name) == len && } else if (current_machine->cpu_type) {
!strncmp(model->name, cpu_type, len)) { const char *cpu_type = current_machine->cpu_type;
/* KVM is enabled and we're using this type, so it works. */ int len = strlen(cpu_type) - strlen(ARM_CPU_TYPE_SUFFIX);
supported = true;
if (strlen(model->name) == len &&
!strncmp(model->name, cpu_type, len)) {
/* KVM is enabled and we're using this type, so it works. */
supported = true;
}
} }
if (!supported) { if (!supported) {
error_setg(errp, "We cannot guarantee the CPU type '%s' works " error_setg(errp, "We cannot guarantee the CPU type '%s' works "

View File

@ -371,7 +371,10 @@ static void pauth_check_trap(CPUARMState *env, int el, uintptr_t ra)
if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
uint64_t hcr = arm_hcr_el2_eff(env); uint64_t hcr = arm_hcr_el2_eff(env);
bool trap = !(hcr & HCR_API); bool trap = !(hcr & HCR_API);
/* FIXME: ARMv8.1-VHE: trap only applies to EL1&0 regime. */ if (el == 0) {
/* Trap only applies to EL1&0 regime. */
trap &= (hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE);
}
/* FIXME: ARMv8.3-NV: HCR_NV trap takes precedence for ERETA[AB]. */ /* FIXME: ARMv8.3-NV: HCR_NV trap takes precedence for ERETA[AB]. */
if (trap) { if (trap) {
pauth_trap(env, 2, ra); pauth_trap(env, 2, ra);
@ -386,14 +389,7 @@ static void pauth_check_trap(CPUARMState *env, int el, uintptr_t ra)
static bool pauth_key_enabled(CPUARMState *env, int el, uint32_t bit) static bool pauth_key_enabled(CPUARMState *env, int el, uint32_t bit)
{ {
uint32_t sctlr; return (arm_sctlr(env, el) & bit) != 0;
if (el == 0) {
/* FIXME: ARMv8.1-VHE S2 translation regime. */
sctlr = env->cp15.sctlr_el[1];
} else {
sctlr = env->cp15.sctlr_el[el];
}
return (sctlr & bit) != 0;
} }
uint64_t HELPER(pacia)(CPUARMState *env, uint64_t x, uint64_t y) uint64_t HELPER(pacia)(CPUARMState *env, uint64_t x, uint64_t y)

View File

@ -105,25 +105,36 @@ void a64_translate_init(void)
offsetof(CPUARMState, exclusive_high), "exclusive_high"); offsetof(CPUARMState, exclusive_high), "exclusive_high");
} }
static inline int get_a64_user_mem_index(DisasContext *s) /*
* Return the core mmu_idx to use for A64 "unprivileged load/store" insns
*/
static int get_a64_user_mem_index(DisasContext *s)
{ {
/* Return the core mmu_idx to use for A64 "unprivileged load/store" insns: /*
* if EL1, access as if EL0; otherwise access at current EL * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
* which is the usual mmu_idx for this cpu state.
*/ */
ARMMMUIdx useridx; ARMMMUIdx useridx = s->mmu_idx;
switch (s->mmu_idx) { if (s->unpriv) {
case ARMMMUIdx_S12NSE1: /*
useridx = ARMMMUIdx_S12NSE0; * We have pre-computed the condition for AccType_UNPRIV.
break; * Therefore we should never get here with a mmu_idx for
case ARMMMUIdx_S1SE1: * which we do not know the corresponding user mmu_idx.
useridx = ARMMMUIdx_S1SE0; */
break; switch (useridx) {
case ARMMMUIdx_S2NS: case ARMMMUIdx_E10_1:
g_assert_not_reached(); useridx = ARMMMUIdx_E10_0;
default: break;
useridx = s->mmu_idx; case ARMMMUIdx_E20_2:
break; useridx = ARMMMUIdx_E20_0;
break;
case ARMMMUIdx_SE10_1:
useridx = ARMMMUIdx_SE10_0;
break;
default:
g_assert_not_reached();
}
} }
return arm_to_core_mmu_idx(useridx); return arm_to_core_mmu_idx(useridx);
} }
@ -175,8 +186,7 @@ static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
if (tbi == 0) { if (tbi == 0) {
/* Load unmodified address */ /* Load unmodified address */
tcg_gen_mov_i64(dst, src); tcg_gen_mov_i64(dst, src);
} else if (s->current_el >= 2) { } else if (!regime_has_2_ranges(s->mmu_idx)) {
/* FIXME: ARMv8.1-VHE S2 translation regime. */
/* Force tag byte to all zero */ /* Force tag byte to all zero */
tcg_gen_extract_i64(dst, src, 0, 56); tcg_gen_extract_i64(dst, src, 0, 56);
} else { } else {
@ -14172,6 +14182,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE); dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT); dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE); dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV);
dc->vec_len = 0; dc->vec_len = 0;
dc->vec_stride = 0; dc->vec_stride = 0;
dc->cp_regs = arm_cpu->cp_regs; dc->cp_regs = arm_cpu->cp_regs;

View File

@ -152,14 +152,14 @@ static inline int get_a32_user_mem_index(DisasContext *s)
* otherwise, access as if at PL0. * otherwise, access as if at PL0.
*/ */
switch (s->mmu_idx) { switch (s->mmu_idx) {
case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */ case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
case ARMMMUIdx_S12NSE0: case ARMMMUIdx_E10_0:
case ARMMMUIdx_S12NSE1: case ARMMMUIdx_E10_1:
return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0); return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
case ARMMMUIdx_S1E3: case ARMMMUIdx_SE3:
case ARMMMUIdx_S1SE0: case ARMMMUIdx_SE10_0:
case ARMMMUIdx_S1SE1: case ARMMMUIdx_SE10_1:
return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0); return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
case ARMMMUIdx_MUser: case ARMMMUIdx_MUser:
case ARMMMUIdx_MPriv: case ARMMMUIdx_MPriv:
return arm_to_core_mmu_idx(ARMMMUIdx_MUser); return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
@ -172,7 +172,6 @@ static inline int get_a32_user_mem_index(DisasContext *s)
case ARMMMUIdx_MSUserNegPri: case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPrivNegPri: case ARMMMUIdx_MSPrivNegPri:
return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri); return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
case ARMMMUIdx_S2NS:
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
@ -10848,38 +10847,48 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
*/ */
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) && dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
!arm_el_is_aa64(env, 3); !arm_el_is_aa64(env, 3);
dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB); dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC); condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
dc->condexec_mask = (condexec & 0xf) << 1; dc->condexec_mask = (condexec & 0xf) << 1;
dc->condexec_cond = condexec >> 4; dc->condexec_cond = condexec >> 4;
core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX); core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx); dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
dc->user = (dc->current_el == 0); dc->user = (dc->current_el == 0);
#endif #endif
dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL); dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN); if (arm_feature(env, ARM_FEATURE_M)) {
if (arm_feature(env, ARM_FEATURE_XSCALE)) { dc->vfp_enabled = 1;
dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR); dc->be_data = MO_TE;
dc->vec_stride = 0; dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
regime_is_secure(env, dc->mmu_idx);
dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
dc->v8m_fpccr_s_wrong =
FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
dc->v7m_new_fp_ctxt_needed =
FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
} else { } else {
dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE); dc->be_data =
dc->c15_cpar = 0; FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
dc->debug_target_el =
FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
} else {
dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
}
} }
dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
regime_is_secure(env, dc->mmu_idx);
dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
dc->v7m_new_fp_ctxt_needed =
FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
dc->cp_regs = cpu->cp_regs; dc->cp_regs = cpu->cp_regs;
dc->features = env->features; dc->features = env->features;
@ -10901,9 +10910,6 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE); dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS); dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
dc->is_ldex = false; dc->is_ldex = false;
if (!arm_feature(env, ARM_FEATURE_M)) {
dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
}
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK; dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
@ -11340,10 +11346,10 @@ static const TranslatorOps thumb_translator_ops = {
/* generate intermediate code for basic block 'tb'. */ /* generate intermediate code for basic block 'tb'. */
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
{ {
DisasContext dc; DisasContext dc = { };
const TranslatorOps *ops = &arm_translator_ops; const TranslatorOps *ops = &arm_translator_ops;
if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) { if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
ops = &thumb_translator_ops; ops = &thumb_translator_ops;
} }
#ifdef TARGET_AARCH64 #ifdef TARGET_AARCH64

View File

@ -73,6 +73,8 @@ typedef struct DisasContext {
* ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*. * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
*/ */
bool is_ldex; bool is_ldex;
/* True if AccType_UNPRIV should be used for LDTR et al */
bool unpriv;
/* True if v8.3-PAuth is active. */ /* True if v8.3-PAuth is active. */
bool pauth_active; bool pauth_active;
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */ /* True with v8.5-BTI and SCTLR_ELx.BT* set. */
@ -126,7 +128,7 @@ static inline int default_exception_el(DisasContext *s)
* exceptions can only be routed to ELs above 1, so we target the higher of * exceptions can only be routed to ELs above 1, so we target the higher of
* 1 or the current EL. * 1 or the current EL.
*/ */
return (s->mmu_idx == ARMMMUIdx_S1SE0 && s->secure_routed_to_el3) return (s->mmu_idx == ARMMMUIdx_SE10_0 && s->secure_routed_to_el3)
? 3 : MAX(1, s->current_el); ? 3 : MAX(1, s->current_el);
} }