mirror of https://github.com/xemu-project/xemu.git
target-arm queue:
* Implement GICv4 emulation * Some cleanup patches in target/arm * hw/arm/smmuv3: Pass the actual perm to returned IOMMUTLBEntry in smmuv3_translate() -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmJisasZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3vcdEACIcvC8E93tFfeKwDQHSdPx 7dPCdq+EZc/xEA2U/q282PFtvNBP6zo65RzWKXTkyfE5exLkCmqJqXSIUVfiuTyT IAx9mL++StpBJMiqAebzEp2n8gwG7JymFeGuHYGet/nRrcwQYacBNxSl+BIVqZAm mUy2UOlqJDlzMAVOcs/Ikfhj0z3qa52aZ8eF6sQI3mbSggiSIWOhyzNYo7jMB1x7 UuHlYpvYDltKT7PveA5JSuBP9OmV5RrqqO4s5c22Y+o4k+La/NURDPdegblMfRA9 MfWAEHqjA1WQaxh/Tb4Bex1u875mFMOXMZk3P910wSeqxMLhTCmjTA2g4p1KhfcA LQJ5G2IvSA7HN660NLhZAqL601/1tS7Qcl387TfcU7WCDbgmzv2RCvH6UACF2hVl CH4bC3lKvemT324aOBs/TCnvdu54qR6hkJZ57XSn59QHvrRvrREVdYNfQnl/g751 GTp8aMcmvTkZ8I7k2t4Tx+CoFO38+rv7PupLN+Eq4k97ovXmAWxekizv8KYu5itY emg63kItorwCgRwkKP28RKWLS/7dEpoF8sg5jBiBtGBGNG0AWPq4GZdrhaL58cr4 lr4nSseN2IRsrp3SgM2203RjdghFM8ey1Dq+x2mRp+Q21vVTltI/VSiUSz0c2Vpo JgbC4Jo+jufMkav31zOCAg== =jqHX -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20220422-1' of https://git.linaro.org/people/pmaydell/qemu-arm into staging target-arm queue: * Implement GICv4 emulation * Some cleanup patches in target/arm * hw/arm/smmuv3: Pass the actual perm to returned IOMMUTLBEntry in smmuv3_translate() # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmJisasZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3vcdEACIcvC8E93tFfeKwDQHSdPx # 7dPCdq+EZc/xEA2U/q282PFtvNBP6zo65RzWKXTkyfE5exLkCmqJqXSIUVfiuTyT # IAx9mL++StpBJMiqAebzEp2n8gwG7JymFeGuHYGet/nRrcwQYacBNxSl+BIVqZAm # mUy2UOlqJDlzMAVOcs/Ikfhj0z3qa52aZ8eF6sQI3mbSggiSIWOhyzNYo7jMB1x7 # UuHlYpvYDltKT7PveA5JSuBP9OmV5RrqqO4s5c22Y+o4k+La/NURDPdegblMfRA9 # MfWAEHqjA1WQaxh/Tb4Bex1u875mFMOXMZk3P910wSeqxMLhTCmjTA2g4p1KhfcA # LQJ5G2IvSA7HN660NLhZAqL601/1tS7Qcl387TfcU7WCDbgmzv2RCvH6UACF2hVl # CH4bC3lKvemT324aOBs/TCnvdu54qR6hkJZ57XSn59QHvrRvrREVdYNfQnl/g751 # GTp8aMcmvTkZ8I7k2t4Tx+CoFO38+rv7PupLN+Eq4k97ovXmAWxekizv8KYu5itY # emg63kItorwCgRwkKP28RKWLS/7dEpoF8sg5jBiBtGBGNG0AWPq4GZdrhaL58cr4 # lr4nSseN2IRsrp3SgM2203RjdghFM8ey1Dq+x2mRp+Q21vVTltI/VSiUSz0c2Vpo # JgbC4Jo+jufMkav31zOCAg== # =jqHX # -----END PGP SIGNATURE----- # gpg: Signature made Fri 22 Apr 2022 06:46:19 AM PDT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] * tag 'pull-target-arm-20220422-1' of https://git.linaro.org/people/pmaydell/qemu-arm: (61 commits) hw/arm/smmuv3: Pass the actual perm to returned IOMMUTLBEntry in smmuv3_translate() target/arm: Use tcg_constant_i32 in translate.h target/arm: Use tcg_constant in translate-vfp.c target/arm: Use smin/smax for do_sat_addsub_32 target/arm: Use tcg_constant in translate-neon.c target/arm: Use tcg_constant in translate-m-nocp.c target/arm: Simplify aa32 DISAS_WFI target/arm: Simplify gen_sar target/arm: Simplify GEN_SHIFT in translate.c target/arm: Split out gen_rebuild_hflags target/arm: Split out set_btype_raw target/arm: Remove fpexc32_access target/arm: Change CPUArchState.thumb to bool target/arm: Change DisasContext.thumb to bool target/arm: Extend store_cpu_offset to take field size target/arm: Change CPUArchState.aarch64 to bool target/arm: Change DisasContext.aarch64 to bool target/arm: Update SCTLR bits to ARMv9.2 target/arm: Update SCR_EL3 bits to ARMv8.8 target/arm: Update ISAR fields for ARMv8.8 ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
754f756cc4
docs/system/arm
hw
arm
intc
include/hw
linux-user/arm
target/arm
|
@ -99,11 +99,14 @@ gic-version
|
|||
GICv2. Note that this limits the number of CPUs to 8.
|
||||
``3``
|
||||
GICv3. This allows up to 512 CPUs.
|
||||
``4``
|
||||
GICv4. Requires ``virtualization`` to be ``on``; allows up to 317 CPUs.
|
||||
``host``
|
||||
Use the same GIC version the host provides, when using KVM
|
||||
``max``
|
||||
Use the best GIC version possible (same as host when using KVM;
|
||||
currently same as ``3``` for TCG, but this may change in future)
|
||||
with TCG this is currently ``3`` if ``virtualization`` is ``off`` and
|
||||
``4`` if ``virtualization`` is ``on``, but this may change in future)
|
||||
|
||||
its
|
||||
Set ``on``/``off`` to enable/disable ITS instantiation. The default is ``on``
|
||||
|
|
|
@ -760,7 +760,7 @@ epilogue:
|
|||
qemu_mutex_unlock(&s->mutex);
|
||||
switch (status) {
|
||||
case SMMU_TRANS_SUCCESS:
|
||||
entry.perm = flag;
|
||||
entry.perm = cached_entry->entry.perm;
|
||||
entry.translated_addr = cached_entry->entry.translated_addr +
|
||||
(addr & cached_entry->entry.addr_mask);
|
||||
entry.addr_mask = cached_entry->entry.addr_mask;
|
||||
|
|
102
hw/arm/virt.c
102
hw/arm/virt.c
|
@ -522,7 +522,7 @@ static void fdt_add_gic_node(VirtMachineState *vms)
|
|||
qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 0x2);
|
||||
qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 0x2);
|
||||
qemu_fdt_setprop(ms->fdt, nodename, "ranges", NULL, 0);
|
||||
if (vms->gic_version == VIRT_GIC_VERSION_3) {
|
||||
if (vms->gic_version != VIRT_GIC_VERSION_2) {
|
||||
int nb_redist_regions = virt_gicv3_redist_region_count(vms);
|
||||
|
||||
qemu_fdt_setprop_string(ms->fdt, nodename, "compatible",
|
||||
|
@ -690,14 +690,32 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
|||
/* We create a standalone GIC */
|
||||
SysBusDevice *gicbusdev;
|
||||
const char *gictype;
|
||||
int type = vms->gic_version, i;
|
||||
int i;
|
||||
unsigned int smp_cpus = ms->smp.cpus;
|
||||
uint32_t nb_redist_regions = 0;
|
||||
int revision;
|
||||
|
||||
gictype = (type == 3) ? gicv3_class_name() : gic_class_name();
|
||||
if (vms->gic_version == VIRT_GIC_VERSION_2) {
|
||||
gictype = gic_class_name();
|
||||
} else {
|
||||
gictype = gicv3_class_name();
|
||||
}
|
||||
|
||||
switch (vms->gic_version) {
|
||||
case VIRT_GIC_VERSION_2:
|
||||
revision = 2;
|
||||
break;
|
||||
case VIRT_GIC_VERSION_3:
|
||||
revision = 3;
|
||||
break;
|
||||
case VIRT_GIC_VERSION_4:
|
||||
revision = 4;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
vms->gic = qdev_new(gictype);
|
||||
qdev_prop_set_uint32(vms->gic, "revision", type);
|
||||
qdev_prop_set_uint32(vms->gic, "revision", revision);
|
||||
qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus);
|
||||
/* Note that the num-irq property counts both internal and external
|
||||
* interrupts; there are always 32 of the former (mandated by GIC spec).
|
||||
|
@ -707,9 +725,8 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
|||
qdev_prop_set_bit(vms->gic, "has-security-extensions", vms->secure);
|
||||
}
|
||||
|
||||
if (type == 3) {
|
||||
uint32_t redist0_capacity =
|
||||
vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
|
||||
if (vms->gic_version != VIRT_GIC_VERSION_2) {
|
||||
uint32_t redist0_capacity = virt_redist_capacity(vms, VIRT_GIC_REDIST);
|
||||
uint32_t redist0_count = MIN(smp_cpus, redist0_capacity);
|
||||
|
||||
nb_redist_regions = virt_gicv3_redist_region_count(vms);
|
||||
|
@ -728,7 +745,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
|||
|
||||
if (nb_redist_regions == 2) {
|
||||
uint32_t redist1_capacity =
|
||||
vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE;
|
||||
virt_redist_capacity(vms, VIRT_HIGH_GIC_REDIST2);
|
||||
|
||||
qdev_prop_set_uint32(vms->gic, "redist-region-count[1]",
|
||||
MIN(smp_cpus - redist0_count, redist1_capacity));
|
||||
|
@ -742,7 +759,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
|||
gicbusdev = SYS_BUS_DEVICE(vms->gic);
|
||||
sysbus_realize_and_unref(gicbusdev, &error_fatal);
|
||||
sysbus_mmio_map(gicbusdev, 0, vms->memmap[VIRT_GIC_DIST].base);
|
||||
if (type == 3) {
|
||||
if (vms->gic_version != VIRT_GIC_VERSION_2) {
|
||||
sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_REDIST].base);
|
||||
if (nb_redist_regions == 2) {
|
||||
sysbus_mmio_map(gicbusdev, 2,
|
||||
|
@ -780,7 +797,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
|||
ppibase + timer_irq[irq]));
|
||||
}
|
||||
|
||||
if (type == 3) {
|
||||
if (vms->gic_version != VIRT_GIC_VERSION_2) {
|
||||
qemu_irq irq = qdev_get_gpio_in(vms->gic,
|
||||
ppibase + ARCH_GIC_MAINT_IRQ);
|
||||
qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt",
|
||||
|
@ -806,9 +823,9 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
|||
|
||||
fdt_add_gic_node(vms);
|
||||
|
||||
if (type == 3 && vms->its) {
|
||||
if (vms->gic_version != VIRT_GIC_VERSION_2 && vms->its) {
|
||||
create_its(vms);
|
||||
} else if (type == 2) {
|
||||
} else if (vms->gic_version == VIRT_GIC_VERSION_2) {
|
||||
create_v2m(vms);
|
||||
}
|
||||
}
|
||||
|
@ -1658,10 +1675,10 @@ static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx)
|
|||
* purposes are to make TCG consistent (with 64-bit KVM hosts)
|
||||
* and to improve SGI efficiency.
|
||||
*/
|
||||
if (vms->gic_version == VIRT_GIC_VERSION_3) {
|
||||
clustersz = GICV3_TARGETLIST_BITS;
|
||||
} else {
|
||||
if (vms->gic_version == VIRT_GIC_VERSION_2) {
|
||||
clustersz = GIC_TARGETLIST_BITS;
|
||||
} else {
|
||||
clustersz = GICV3_TARGETLIST_BITS;
|
||||
}
|
||||
}
|
||||
return arm_cpu_mp_affinity(idx, clustersz);
|
||||
|
@ -1794,6 +1811,10 @@ static void finalize_gic_version(VirtMachineState *vms)
|
|||
error_report(
|
||||
"gic-version=3 is not supported with kernel-irqchip=off");
|
||||
exit(1);
|
||||
case VIRT_GIC_VERSION_4:
|
||||
error_report(
|
||||
"gic-version=4 is not supported with kernel-irqchip=off");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1831,6 +1852,9 @@ static void finalize_gic_version(VirtMachineState *vms)
|
|||
case VIRT_GIC_VERSION_2:
|
||||
case VIRT_GIC_VERSION_3:
|
||||
break;
|
||||
case VIRT_GIC_VERSION_4:
|
||||
error_report("gic-version=4 is not supported with KVM");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Check chosen version is effectively supported by the host */
|
||||
|
@ -1854,7 +1878,12 @@ static void finalize_gic_version(VirtMachineState *vms)
|
|||
case VIRT_GIC_VERSION_MAX:
|
||||
if (module_object_class_by_name("arm-gicv3")) {
|
||||
/* CONFIG_ARM_GICV3_TCG was set */
|
||||
vms->gic_version = VIRT_GIC_VERSION_3;
|
||||
if (vms->virt) {
|
||||
/* GICv4 only makes sense if CPU has EL2 */
|
||||
vms->gic_version = VIRT_GIC_VERSION_4;
|
||||
} else {
|
||||
vms->gic_version = VIRT_GIC_VERSION_3;
|
||||
}
|
||||
} else {
|
||||
vms->gic_version = VIRT_GIC_VERSION_2;
|
||||
}
|
||||
|
@ -1862,6 +1891,12 @@ static void finalize_gic_version(VirtMachineState *vms)
|
|||
case VIRT_GIC_VERSION_HOST:
|
||||
error_report("gic-version=host requires KVM");
|
||||
exit(1);
|
||||
case VIRT_GIC_VERSION_4:
|
||||
if (!vms->virt) {
|
||||
error_report("gic-version=4 requires virtualization enabled");
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case VIRT_GIC_VERSION_2:
|
||||
case VIRT_GIC_VERSION_3:
|
||||
break;
|
||||
|
@ -2029,16 +2064,16 @@ static void machvirt_init(MachineState *machine)
|
|||
vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC;
|
||||
}
|
||||
|
||||
/* The maximum number of CPUs depends on the GIC version, or on how
|
||||
* many redistributors we can fit into the memory map.
|
||||
/*
|
||||
* The maximum number of CPUs depends on the GIC version, or on how
|
||||
* many redistributors we can fit into the memory map (which in turn
|
||||
* depends on whether this is a GICv3 or v4).
|
||||
*/
|
||||
if (vms->gic_version == VIRT_GIC_VERSION_3) {
|
||||
virt_max_cpus =
|
||||
vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
|
||||
virt_max_cpus +=
|
||||
vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE;
|
||||
} else {
|
||||
if (vms->gic_version == VIRT_GIC_VERSION_2) {
|
||||
virt_max_cpus = GIC_NCPU;
|
||||
} else {
|
||||
virt_max_cpus = virt_redist_capacity(vms, VIRT_GIC_REDIST) +
|
||||
virt_redist_capacity(vms, VIRT_HIGH_GIC_REDIST2);
|
||||
}
|
||||
|
||||
if (max_cpus > virt_max_cpus) {
|
||||
|
@ -2426,8 +2461,19 @@ static void virt_set_mte(Object *obj, bool value, Error **errp)
|
|||
static char *virt_get_gic_version(Object *obj, Error **errp)
|
||||
{
|
||||
VirtMachineState *vms = VIRT_MACHINE(obj);
|
||||
const char *val = vms->gic_version == VIRT_GIC_VERSION_3 ? "3" : "2";
|
||||
const char *val;
|
||||
|
||||
switch (vms->gic_version) {
|
||||
case VIRT_GIC_VERSION_4:
|
||||
val = "4";
|
||||
break;
|
||||
case VIRT_GIC_VERSION_3:
|
||||
val = "3";
|
||||
break;
|
||||
default:
|
||||
val = "2";
|
||||
break;
|
||||
}
|
||||
return g_strdup(val);
|
||||
}
|
||||
|
||||
|
@ -2435,7 +2481,9 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp)
|
|||
{
|
||||
VirtMachineState *vms = VIRT_MACHINE(obj);
|
||||
|
||||
if (!strcmp(value, "3")) {
|
||||
if (!strcmp(value, "4")) {
|
||||
vms->gic_version = VIRT_GIC_VERSION_4;
|
||||
} else if (!strcmp(value, "3")) {
|
||||
vms->gic_version = VIRT_GIC_VERSION_3;
|
||||
} else if (!strcmp(value, "2")) {
|
||||
vms->gic_version = VIRT_GIC_VERSION_2;
|
||||
|
@ -2893,7 +2941,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
|
|||
virt_set_gic_version);
|
||||
object_class_property_set_description(oc, "gic-version",
|
||||
"Set GIC version. "
|
||||
"Valid values are 2, 3, host and max");
|
||||
"Valid values are 2, 3, 4, host and max");
|
||||
|
||||
object_class_property_add_str(oc, "iommu", virt_get_iommu, virt_set_iommu);
|
||||
object_class_property_set_description(oc, "iommu",
|
||||
|
|
|
@ -144,6 +144,25 @@ const VMStateDescription vmstate_gicv3_cpu_sre_el1 = {
|
|||
}
|
||||
};
|
||||
|
||||
static bool gicv4_needed(void *opaque)
|
||||
{
|
||||
GICv3CPUState *cs = opaque;
|
||||
|
||||
return cs->gic->revision > 3;
|
||||
}
|
||||
|
||||
const VMStateDescription vmstate_gicv3_gicv4 = {
|
||||
.name = "arm_gicv3_cpu/gicv4",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = gicv4_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT64(gicr_vpropbaser, GICv3CPUState),
|
||||
VMSTATE_UINT64(gicr_vpendbaser, GICv3CPUState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_gicv3_cpu = {
|
||||
.name = "arm_gicv3_cpu",
|
||||
.version_id = 1,
|
||||
|
@ -175,6 +194,7 @@ static const VMStateDescription vmstate_gicv3_cpu = {
|
|||
.subsections = (const VMStateDescription * []) {
|
||||
&vmstate_gicv3_cpu_virt,
|
||||
&vmstate_gicv3_cpu_sre_el1,
|
||||
&vmstate_gicv3_gicv4,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
@ -295,7 +315,7 @@ void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
|
|||
|
||||
memory_region_init_io(®ion->iomem, OBJECT(s),
|
||||
ops ? &ops[1] : NULL, region, name,
|
||||
s->redist_region_count[i] * GICV3_REDIST_SIZE);
|
||||
s->redist_region_count[i] * gicv3_redist_size(s));
|
||||
sysbus_init_mmio(sbd, ®ion->iomem);
|
||||
g_free(name);
|
||||
}
|
||||
|
@ -306,12 +326,14 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
|
|||
GICv3State *s = ARM_GICV3_COMMON(dev);
|
||||
int i, rdist_capacity, cpuidx;
|
||||
|
||||
/* revision property is actually reserved and currently used only in order
|
||||
* to keep the interface compatible with GICv2 code, avoiding extra
|
||||
* conditions. However, in future it could be used, for example, if we
|
||||
* implement GICv4.
|
||||
/*
|
||||
* This GIC device supports only revisions 3 and 4. The GICv1/v2
|
||||
* is a separate device.
|
||||
* Note that subclasses of this device may impose further restrictions
|
||||
* on the GIC revision: notably, the in-kernel KVM GIC doesn't
|
||||
* support GICv4.
|
||||
*/
|
||||
if (s->revision != 3) {
|
||||
if (s->revision != 3 && s->revision != 4) {
|
||||
error_setg(errp, "unsupported GIC revision %d", s->revision);
|
||||
return;
|
||||
}
|
||||
|
@ -328,6 +350,10 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
|
|||
s->num_irq, GIC_INTERNAL);
|
||||
return;
|
||||
}
|
||||
if (s->num_cpu == 0) {
|
||||
error_setg(errp, "num-cpu must be at least 1");
|
||||
return;
|
||||
}
|
||||
|
||||
/* ITLinesNumber is represented as (N / 32) - 1, so this is an
|
||||
* implementation imposed restriction, not an architectural one,
|
||||
|
@ -350,9 +376,9 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
|
|||
for (i = 0; i < s->nb_redist_regions; i++) {
|
||||
rdist_capacity += s->redist_region_count[i];
|
||||
}
|
||||
if (rdist_capacity < s->num_cpu) {
|
||||
if (rdist_capacity != s->num_cpu) {
|
||||
error_setg(errp, "Capacity of the redist regions(%d) "
|
||||
"is less than number of vcpus(%d)",
|
||||
"does not match the number of vcpus(%d)",
|
||||
rdist_capacity, s->num_cpu);
|
||||
return;
|
||||
}
|
||||
|
@ -382,8 +408,8 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
|
|||
* Last == 1 if this is the last redistributor in a series of
|
||||
* contiguous redistributor pages
|
||||
* DirectLPI == 0 (direct injection of LPIs not supported)
|
||||
* VLPIS == 0 (virtual LPIs not supported)
|
||||
* PLPIS == 0 (physical LPIs not supported)
|
||||
* VLPIS == 1 if vLPIs supported (GICv4 and up)
|
||||
* PLPIS == 1 if LPIs supported
|
||||
*/
|
||||
cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL);
|
||||
|
||||
|
@ -398,6 +424,9 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
|
|||
|
||||
if (s->lpi_enable) {
|
||||
s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS;
|
||||
if (s->revision > 3) {
|
||||
s->cpu[i].gicr_typer |= GICR_TYPER_VLPIS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -410,6 +439,8 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
|
|||
cpuidx += s->redist_region_count[i];
|
||||
s->cpu[cpuidx - 1].gicr_typer |= GICR_TYPER_LAST;
|
||||
}
|
||||
|
||||
s->itslist = g_ptr_array_new();
|
||||
}
|
||||
|
||||
static void arm_gicv3_finalize(Object *obj)
|
||||
|
@ -438,6 +469,8 @@ static void arm_gicv3_common_reset(DeviceState *dev)
|
|||
cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep;
|
||||
cs->gicr_propbaser = 0;
|
||||
cs->gicr_pendbaser = 0;
|
||||
cs->gicr_vpropbaser = 0;
|
||||
cs->gicr_vpendbaser = 0;
|
||||
/* If we're resetting a TZ-aware GIC as if secure firmware
|
||||
* had set it up ready to start a kernel in non-secure, we
|
||||
* need to set interrupts to group 1 so the kernel can use them.
|
||||
|
@ -459,6 +492,7 @@ static void arm_gicv3_common_reset(DeviceState *dev)
|
|||
|
||||
cs->hppi.prio = 0xff;
|
||||
cs->hpplpi.prio = 0xff;
|
||||
cs->hppvlpi.prio = 0xff;
|
||||
|
||||
/* State in the CPU interface must *not* be reset here, because it
|
||||
* is part of the CPU's reset domain, not the GIC device's.
|
||||
|
|
|
@ -21,6 +21,12 @@
|
|||
#include "hw/irq.h"
|
||||
#include "cpu.h"
|
||||
|
||||
/*
|
||||
* Special case return value from hppvi_index(); must be larger than
|
||||
* the architecturally maximum possible list register index (which is 15)
|
||||
*/
|
||||
#define HPPVI_INDEX_VLPI 16
|
||||
|
||||
static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
|
||||
{
|
||||
return env->gicv3state;
|
||||
|
@ -157,10 +163,18 @@ static int ich_highest_active_virt_prio(GICv3CPUState *cs)
|
|||
|
||||
static int hppvi_index(GICv3CPUState *cs)
|
||||
{
|
||||
/* Return the list register index of the highest priority pending
|
||||
/*
|
||||
* Return the list register index of the highest priority pending
|
||||
* virtual interrupt, as per the HighestPriorityVirtualInterrupt
|
||||
* pseudocode. If no pending virtual interrupts, return -1.
|
||||
* If the highest priority pending virtual interrupt is a vLPI,
|
||||
* return HPPVI_INDEX_VLPI.
|
||||
* (The pseudocode handles checking whether the vLPI is higher
|
||||
* priority than the highest priority list register at every
|
||||
* callsite of HighestPriorityVirtualInterrupt; we check it here.)
|
||||
*/
|
||||
ARMCPU *cpu = ARM_CPU(cs->cpu);
|
||||
CPUARMState *env = &cpu->env;
|
||||
int idx = -1;
|
||||
int i;
|
||||
/* Note that a list register entry with a priority of 0xff will
|
||||
|
@ -202,6 +216,23 @@ static int hppvi_index(GICv3CPUState *cs)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* "no pending vLPI" is indicated with prio = 0xff, which always
|
||||
* fails the priority check here. vLPIs are only considered
|
||||
* when we are in Non-Secure state.
|
||||
*/
|
||||
if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) {
|
||||
if (cs->hppvlpi.grp == GICV3_G0) {
|
||||
if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) {
|
||||
return HPPVI_INDEX_VLPI;
|
||||
}
|
||||
} else {
|
||||
if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) {
|
||||
return HPPVI_INDEX_VLPI;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
|
@ -289,6 +320,47 @@ static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs)
|
||||
{
|
||||
/*
|
||||
* Return true if we can signal the highest priority pending vLPI.
|
||||
* We can assume we're Non-secure because hppvi_index() already
|
||||
* tested for that.
|
||||
*/
|
||||
uint32_t mask, rprio, vpmr;
|
||||
|
||||
if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
|
||||
/* Virtual interface disabled */
|
||||
return false;
|
||||
}
|
||||
|
||||
vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
|
||||
ICH_VMCR_EL2_VPMR_LENGTH);
|
||||
|
||||
if (cs->hppvlpi.prio >= vpmr) {
|
||||
/* Priority mask masks this interrupt */
|
||||
return false;
|
||||
}
|
||||
|
||||
rprio = ich_highest_active_virt_prio(cs);
|
||||
if (rprio == 0xff) {
|
||||
/* No running interrupt so we can preempt */
|
||||
return true;
|
||||
}
|
||||
|
||||
mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
|
||||
|
||||
/*
|
||||
* We only preempt a running interrupt if the pending interrupt's
|
||||
* group priority is sufficient (the subpriorities are not considered).
|
||||
*/
|
||||
if ((cs->hppvlpi.prio & mask) < (rprio & mask)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
|
||||
uint32_t *misr)
|
||||
{
|
||||
|
@ -370,9 +442,55 @@ static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
|
|||
return value;
|
||||
}
|
||||
|
||||
void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs)
|
||||
{
|
||||
/*
|
||||
* Tell the CPU about any pending virtual interrupts.
|
||||
* This should only be called for changes that affect the
|
||||
* vIRQ and vFIQ status and do not change the maintenance
|
||||
* interrupt status. This means that unlike gicv3_cpuif_virt_update()
|
||||
* this function won't recursively call back into the GIC code.
|
||||
* The main use of this is when the redistributor has changed the
|
||||
* highest priority pending virtual LPI.
|
||||
*/
|
||||
int idx;
|
||||
int irqlevel = 0;
|
||||
int fiqlevel = 0;
|
||||
|
||||
idx = hppvi_index(cs);
|
||||
trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx,
|
||||
cs->hppvlpi.irq, cs->hppvlpi.grp,
|
||||
cs->hppvlpi.prio);
|
||||
if (idx == HPPVI_INDEX_VLPI) {
|
||||
if (icv_hppvlpi_can_preempt(cs)) {
|
||||
if (cs->hppvlpi.grp == GICV3_G0) {
|
||||
fiqlevel = 1;
|
||||
} else {
|
||||
irqlevel = 1;
|
||||
}
|
||||
}
|
||||
} else if (idx >= 0) {
|
||||
uint64_t lr = cs->ich_lr_el2[idx];
|
||||
|
||||
if (icv_hppi_can_preempt(cs, lr)) {
|
||||
/* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
|
||||
if (lr & ICH_LR_EL2_GROUP) {
|
||||
irqlevel = 1;
|
||||
} else {
|
||||
fiqlevel = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
|
||||
qemu_set_irq(cs->parent_vfiq, fiqlevel);
|
||||
qemu_set_irq(cs->parent_virq, irqlevel);
|
||||
}
|
||||
|
||||
static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
|
||||
{
|
||||
/* Tell the CPU about any pending virtual interrupts or
|
||||
/*
|
||||
* Tell the CPU about any pending virtual interrupts or
|
||||
* maintenance interrupts, following a change to the state
|
||||
* of the CPU interface relevant to virtual interrupts.
|
||||
*
|
||||
|
@ -389,37 +507,17 @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
|
|||
* naturally as a result of there being no architectural
|
||||
* linkage between the physical and virtual GIC logic.
|
||||
*/
|
||||
int idx;
|
||||
int irqlevel = 0;
|
||||
int fiqlevel = 0;
|
||||
int maintlevel = 0;
|
||||
ARMCPU *cpu = ARM_CPU(cs->cpu);
|
||||
int maintlevel = 0;
|
||||
|
||||
idx = hppvi_index(cs);
|
||||
trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
|
||||
if (idx >= 0) {
|
||||
uint64_t lr = cs->ich_lr_el2[idx];
|
||||
|
||||
if (icv_hppi_can_preempt(cs, lr)) {
|
||||
/* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
|
||||
if (lr & ICH_LR_EL2_GROUP) {
|
||||
irqlevel = 1;
|
||||
} else {
|
||||
fiqlevel = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
gicv3_cpuif_virt_irq_fiq_update(cs);
|
||||
|
||||
if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
|
||||
maintenance_interrupt_state(cs) != 0) {
|
||||
maintlevel = 1;
|
||||
}
|
||||
|
||||
trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
|
||||
irqlevel, maintlevel);
|
||||
|
||||
qemu_set_irq(cs->parent_vfiq, fiqlevel);
|
||||
qemu_set_irq(cs->parent_virq, irqlevel);
|
||||
trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel);
|
||||
qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
|
||||
}
|
||||
|
||||
|
@ -445,7 +543,7 @@ static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
|
||||
|
||||
gicv3_cpuif_virt_update(cs);
|
||||
gicv3_cpuif_virt_irq_fiq_update(cs);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -490,7 +588,7 @@ static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
|
||||
write_vbpr(cs, grp, value);
|
||||
|
||||
gicv3_cpuif_virt_update(cs);
|
||||
gicv3_cpuif_virt_irq_fiq_update(cs);
|
||||
}
|
||||
|
||||
static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
|
@ -517,7 +615,7 @@ static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
|
||||
ICH_VMCR_EL2_VPMR_LENGTH, value);
|
||||
|
||||
gicv3_cpuif_virt_update(cs);
|
||||
gicv3_cpuif_virt_irq_fiq_update(cs);
|
||||
}
|
||||
|
||||
static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
|
@ -584,7 +682,7 @@ static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
|
||||
1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
|
||||
|
||||
gicv3_cpuif_virt_update(cs);
|
||||
gicv3_cpuif_virt_irq_fiq_update(cs);
|
||||
}
|
||||
|
||||
static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
|
@ -603,7 +701,11 @@ static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|||
int idx = hppvi_index(cs);
|
||||
uint64_t value = INTID_SPURIOUS;
|
||||
|
||||
if (idx >= 0) {
|
||||
if (idx == HPPVI_INDEX_VLPI) {
|
||||
if (cs->hppvlpi.grp == grp) {
|
||||
value = cs->hppvlpi.irq;
|
||||
}
|
||||
} else if (idx >= 0) {
|
||||
uint64_t lr = cs->ich_lr_el2[idx];
|
||||
int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
|
||||
|
||||
|
@ -634,6 +736,18 @@ static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
|
|||
cs->ich_apr[grp][regno] |= (1 << regbit);
|
||||
}
|
||||
|
||||
static void icv_activate_vlpi(GICv3CPUState *cs)
|
||||
{
|
||||
uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
|
||||
int prio = cs->hppvlpi.prio & mask;
|
||||
int aprbit = prio >> (8 - cs->vprebits);
|
||||
int regno = aprbit / 32;
|
||||
int regbit = aprbit % 32;
|
||||
|
||||
cs->ich_apr[cs->hppvlpi.grp][regno] |= (1 << regbit);
|
||||
gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0);
|
||||
}
|
||||
|
||||
static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
GICv3CPUState *cs = icc_cs_from_env(env);
|
||||
|
@ -641,7 +755,12 @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|||
int idx = hppvi_index(cs);
|
||||
uint64_t intid = INTID_SPURIOUS;
|
||||
|
||||
if (idx >= 0) {
|
||||
if (idx == HPPVI_INDEX_VLPI) {
|
||||
if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) {
|
||||
intid = cs->hppvlpi.irq;
|
||||
icv_activate_vlpi(cs);
|
||||
}
|
||||
} else if (idx >= 0) {
|
||||
uint64_t lr = cs->ich_lr_el2[idx];
|
||||
int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
|
||||
|
||||
|
@ -2333,7 +2452,7 @@ static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
|
||||
|
||||
cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
|
||||
gicv3_cpuif_virt_update(cs);
|
||||
gicv3_cpuif_virt_irq_fiq_update(cs);
|
||||
}
|
||||
|
||||
static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
|
@ -2459,11 +2578,15 @@ static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|||
uint64_t value;
|
||||
|
||||
value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
|
||||
| ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V
|
||||
| ICH_VTR_EL2_TDS | ICH_VTR_EL2_A3V
|
||||
| (1 << ICH_VTR_EL2_IDBITS_SHIFT)
|
||||
| ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
|
||||
| ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
|
||||
|
||||
if (cs->gic->revision < 4) {
|
||||
value |= ICH_VTR_EL2_NV4;
|
||||
}
|
||||
|
||||
trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
|
||||
return value;
|
||||
}
|
||||
|
@ -2616,6 +2739,12 @@ static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
|
|||
GICv3CPUState *cs = opaque;
|
||||
|
||||
gicv3_cpuif_update(cs);
|
||||
/*
|
||||
* Because vLPIs are only pending in NonSecure state,
|
||||
* an EL change can change the VIRQ/VFIQ status (but
|
||||
* cannot affect the maintenance interrupt state)
|
||||
*/
|
||||
gicv3_cpuif_virt_irq_fiq_update(cs);
|
||||
}
|
||||
|
||||
void gicv3_init_cpuif(GICv3State *s)
|
||||
|
|
|
@ -383,7 +383,7 @@ static bool gicd_readl(GICv3State *s, hwaddr offset,
|
|||
* No1N == 1 (1-of-N SPI interrupts not supported)
|
||||
* A3V == 1 (non-zero values of Affinity level 3 supported)
|
||||
* IDbits == 0xf (we support 16-bit interrupt identifiers)
|
||||
* DVIS == 0 (Direct virtual LPI injection not supported)
|
||||
* DVIS == 1 (Direct virtual LPI injection supported) if GICv4
|
||||
* LPIS == 1 (LPIs are supported if affinity routing is enabled)
|
||||
* num_LPIs == 0b00000 (bits [15:11],Number of LPIs as indicated
|
||||
* by GICD_TYPER.IDbits)
|
||||
|
@ -399,8 +399,9 @@ static bool gicd_readl(GICv3State *s, hwaddr offset,
|
|||
* so we only need to check the DS bit.
|
||||
*/
|
||||
bool sec_extn = !(s->gicd_ctlr & GICD_CTLR_DS);
|
||||
bool dvis = s->revision >= 4;
|
||||
|
||||
*data = (1 << 25) | (1 << 24) | (sec_extn << 10) |
|
||||
*data = (1 << 25) | (1 << 24) | (dvis << 18) | (sec_extn << 10) |
|
||||
(s->lpi_enable << GICD_TYPER_LPIS_SHIFT) |
|
||||
(0xf << 19) | itlinesnumber;
|
||||
return true;
|
||||
|
@ -557,7 +558,7 @@ static bool gicd_readl(GICv3State *s, hwaddr offset,
|
|||
}
|
||||
case GICD_IDREGS ... GICD_IDREGS + 0x2f:
|
||||
/* ID registers */
|
||||
*data = gicv3_idreg(offset - GICD_IDREGS);
|
||||
*data = gicv3_idreg(s, offset - GICD_IDREGS, GICV3_PIDR0_DIST);
|
||||
return true;
|
||||
case GICD_SGIR:
|
||||
/* WO registers, return unknown value */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -106,6 +106,8 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
|
|||
kvm_arm_register_device(&s->iomem_its_cntrl, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
|
||||
KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd, 0);
|
||||
|
||||
gicv3_add_its(s->gicv3, dev);
|
||||
|
||||
gicv3_its_init_mmio(s, NULL, NULL);
|
||||
|
||||
if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
|
||||
|
|
|
@ -781,6 +781,11 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
|
|||
return;
|
||||
}
|
||||
|
||||
if (s->revision != 3) {
|
||||
error_setg(errp, "unsupported GIC revision %d for in-kernel GIC",
|
||||
s->revision);
|
||||
}
|
||||
|
||||
if (s->security_extn) {
|
||||
error_setg(errp, "the in-kernel VGICv3 does not implement the "
|
||||
"security extensions");
|
||||
|
|
|
@ -60,6 +60,132 @@ static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
|
|||
return reg;
|
||||
}
|
||||
|
||||
static bool vcpu_resident(GICv3CPUState *cs, uint64_t vptaddr)
|
||||
{
|
||||
/*
|
||||
* Return true if a vCPU is resident, which is defined by
|
||||
* whether the GICR_VPENDBASER register is marked VALID and
|
||||
* has the right virtual pending table address.
|
||||
*/
|
||||
if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
|
||||
return false;
|
||||
}
|
||||
return vptaddr == (cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK);
|
||||
}
|
||||
|
||||
/**
|
||||
* update_for_one_lpi: Update pending information if this LPI is better
|
||||
*
|
||||
* @cs: GICv3CPUState
|
||||
* @irq: interrupt to look up in the LPI Configuration table
|
||||
* @ctbase: physical address of the LPI Configuration table to use
|
||||
* @ds: true if priority value should not be shifted
|
||||
* @hpp: points to pending information to update
|
||||
*
|
||||
* Look up @irq in the Configuration table specified by @ctbase
|
||||
* to see if it is enabled and what its priority is. If it is an
|
||||
* enabled interrupt with a higher priority than that currently
|
||||
* recorded in @hpp, update @hpp.
|
||||
*/
|
||||
static void update_for_one_lpi(GICv3CPUState *cs, int irq,
|
||||
uint64_t ctbase, bool ds, PendingIrq *hpp)
|
||||
{
|
||||
uint8_t lpite;
|
||||
uint8_t prio;
|
||||
|
||||
address_space_read(&cs->gic->dma_as,
|
||||
ctbase + ((irq - GICV3_LPI_INTID_START) * sizeof(lpite)),
|
||||
MEMTXATTRS_UNSPECIFIED, &lpite, sizeof(lpite));
|
||||
|
||||
if (!(lpite & LPI_CTE_ENABLED)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (ds) {
|
||||
prio = lpite & LPI_PRIORITY_MASK;
|
||||
} else {
|
||||
prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
|
||||
}
|
||||
|
||||
if ((prio < hpp->prio) ||
|
||||
((prio == hpp->prio) && (irq <= hpp->irq))) {
|
||||
hpp->irq = irq;
|
||||
hpp->prio = prio;
|
||||
/* LPIs and vLPIs are always non-secure Grp1 interrupts */
|
||||
hpp->grp = GICV3_G1NS;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* update_for_all_lpis: Fully scan LPI tables and find best pending LPI
|
||||
*
|
||||
* @cs: GICv3CPUState
|
||||
* @ptbase: physical address of LPI Pending table
|
||||
* @ctbase: physical address of LPI Configuration table
|
||||
* @ptsizebits: size of tables, specified as number of interrupt ID bits minus 1
|
||||
* @ds: true if priority value should not be shifted
|
||||
* @hpp: points to pending information to set
|
||||
*
|
||||
* Recalculate the highest priority pending enabled LPI from scratch,
|
||||
* and set @hpp accordingly.
|
||||
*
|
||||
* We scan the LPI pending table @ptbase; for each pending LPI, we read the
|
||||
* corresponding entry in the LPI configuration table @ctbase to extract
|
||||
* the priority and enabled information.
|
||||
*
|
||||
* We take @ptsizebits in the form idbits-1 because this is the way that
|
||||
* LPI table sizes are architecturally specified in GICR_PROPBASER.IDBits
|
||||
* and in the VMAPP command's VPT_size field.
|
||||
*/
|
||||
static void update_for_all_lpis(GICv3CPUState *cs, uint64_t ptbase,
|
||||
uint64_t ctbase, unsigned ptsizebits,
|
||||
bool ds, PendingIrq *hpp)
|
||||
{
|
||||
AddressSpace *as = &cs->gic->dma_as;
|
||||
uint8_t pend;
|
||||
uint32_t pendt_size = (1ULL << (ptsizebits + 1));
|
||||
int i, bit;
|
||||
|
||||
hpp->prio = 0xff;
|
||||
|
||||
for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
|
||||
address_space_read(as, ptbase + i, MEMTXATTRS_UNSPECIFIED, &pend, 1);
|
||||
while (pend) {
|
||||
bit = ctz32(pend);
|
||||
update_for_one_lpi(cs, i * 8 + bit, ctbase, ds, hpp);
|
||||
pend &= ~(1 << bit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* set_lpi_pending_bit: Set or clear pending bit for an LPI
|
||||
*
|
||||
* @cs: GICv3CPUState
|
||||
* @ptbase: physical address of LPI Pending table
|
||||
* @irq: LPI to change pending state for
|
||||
* @level: false to clear pending state, true to set
|
||||
*
|
||||
* Returns true if we needed to do something, false if the pending bit
|
||||
* was already at @level.
|
||||
*/
|
||||
static bool set_pending_table_bit(GICv3CPUState *cs, uint64_t ptbase,
|
||||
int irq, bool level)
|
||||
{
|
||||
AddressSpace *as = &cs->gic->dma_as;
|
||||
uint64_t addr = ptbase + irq / 8;
|
||||
uint8_t pend;
|
||||
|
||||
address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
|
||||
if (extract32(pend, irq % 8, 1) == level) {
|
||||
/* Bit already at requested state, no action required */
|
||||
return false;
|
||||
}
|
||||
pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
|
||||
address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
|
||||
int irq)
|
||||
{
|
||||
|
@ -100,6 +226,87 @@ static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
|
|||
cs->gicr_ipriorityr[irq] = value;
|
||||
}
|
||||
|
||||
static void gicv3_redist_update_vlpi_only(GICv3CPUState *cs)
|
||||
{
|
||||
uint64_t ptbase, ctbase, idbits;
|
||||
|
||||
if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) {
|
||||
cs->hppvlpi.prio = 0xff;
|
||||
return;
|
||||
}
|
||||
|
||||
ptbase = cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK;
|
||||
ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
|
||||
idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
|
||||
|
||||
update_for_all_lpis(cs, ptbase, ctbase, idbits, true, &cs->hppvlpi);
|
||||
}
|
||||
|
||||
static void gicv3_redist_update_vlpi(GICv3CPUState *cs)
|
||||
{
|
||||
gicv3_redist_update_vlpi_only(cs);
|
||||
gicv3_cpuif_virt_irq_fiq_update(cs);
|
||||
}
|
||||
|
||||
static void gicr_write_vpendbaser(GICv3CPUState *cs, uint64_t newval)
|
||||
{
|
||||
/* Write @newval to GICR_VPENDBASER, handling its effects */
|
||||
bool oldvalid = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID);
|
||||
bool newvalid = FIELD_EX64(newval, GICR_VPENDBASER, VALID);
|
||||
bool pendinglast;
|
||||
|
||||
/*
|
||||
* The DIRTY bit is read-only and for us is always zero;
|
||||
* other fields are writeable.
|
||||
*/
|
||||
newval &= R_GICR_VPENDBASER_INNERCACHE_MASK |
|
||||
R_GICR_VPENDBASER_SHAREABILITY_MASK |
|
||||
R_GICR_VPENDBASER_PHYADDR_MASK |
|
||||
R_GICR_VPENDBASER_OUTERCACHE_MASK |
|
||||
R_GICR_VPENDBASER_PENDINGLAST_MASK |
|
||||
R_GICR_VPENDBASER_IDAI_MASK |
|
||||
R_GICR_VPENDBASER_VALID_MASK;
|
||||
|
||||
if (oldvalid && newvalid) {
|
||||
/*
|
||||
* Changing other fields while VALID is 1 is UNPREDICTABLE;
|
||||
* we choose to log and ignore the write.
|
||||
*/
|
||||
if (cs->gicr_vpendbaser ^ newval) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"%s: Changing GICR_VPENDBASER when VALID=1 "
|
||||
"is UNPREDICTABLE\n", __func__);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (!oldvalid && !newvalid) {
|
||||
cs->gicr_vpendbaser = newval;
|
||||
return;
|
||||
}
|
||||
|
||||
if (newvalid) {
|
||||
/*
|
||||
* Valid going from 0 to 1: update hppvlpi from tables.
|
||||
* If IDAI is 0 we are allowed to use the info we cached in
|
||||
* the IMPDEF area of the table.
|
||||
* PendingLast is RES1 when we make this transition.
|
||||
*/
|
||||
pendinglast = true;
|
||||
} else {
|
||||
/*
|
||||
* Valid going from 1 to 0:
|
||||
* Set PendingLast if there was a pending enabled interrupt
|
||||
* for the vPE that was just descheduled.
|
||||
* If we cache info in the IMPDEF area, write it out here.
|
||||
*/
|
||||
pendinglast = cs->hppvlpi.prio != 0xff;
|
||||
}
|
||||
|
||||
newval = FIELD_DP64(newval, GICR_VPENDBASER, PENDINGLAST, pendinglast);
|
||||
cs->gicr_vpendbaser = newval;
|
||||
gicv3_redist_update_vlpi(cs);
|
||||
}
|
||||
|
||||
static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
|
||||
uint64_t *data, MemTxAttrs attrs)
|
||||
{
|
||||
|
@ -234,7 +441,24 @@ static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
|
|||
*data = cs->gicr_nsacr;
|
||||
return MEMTX_OK;
|
||||
case GICR_IDREGS ... GICR_IDREGS + 0x2f:
|
||||
*data = gicv3_idreg(offset - GICR_IDREGS);
|
||||
*data = gicv3_idreg(cs->gic, offset - GICR_IDREGS, GICV3_PIDR0_REDIST);
|
||||
return MEMTX_OK;
|
||||
/*
|
||||
* VLPI frame registers. We don't need a version check for
|
||||
* VPROPBASER and VPENDBASER because gicv3_redist_size() will
|
||||
* prevent pre-v4 GIC from passing us offsets this high.
|
||||
*/
|
||||
case GICR_VPROPBASER:
|
||||
*data = extract64(cs->gicr_vpropbaser, 0, 32);
|
||||
return MEMTX_OK;
|
||||
case GICR_VPROPBASER + 4:
|
||||
*data = extract64(cs->gicr_vpropbaser, 32, 32);
|
||||
return MEMTX_OK;
|
||||
case GICR_VPENDBASER:
|
||||
*data = extract64(cs->gicr_vpendbaser, 0, 32);
|
||||
return MEMTX_OK;
|
||||
case GICR_VPENDBASER + 4:
|
||||
*data = extract64(cs->gicr_vpendbaser, 32, 32);
|
||||
return MEMTX_OK;
|
||||
default:
|
||||
return MEMTX_ERROR;
|
||||
|
@ -379,6 +603,23 @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
|
|||
"%s: invalid guest write to RO register at offset "
|
||||
TARGET_FMT_plx "\n", __func__, offset);
|
||||
return MEMTX_OK;
|
||||
/*
|
||||
* VLPI frame registers. We don't need a version check for
|
||||
* VPROPBASER and VPENDBASER because gicv3_redist_size() will
|
||||
* prevent pre-v4 GIC from passing us offsets this high.
|
||||
*/
|
||||
case GICR_VPROPBASER:
|
||||
cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 0, 32, value);
|
||||
return MEMTX_OK;
|
||||
case GICR_VPROPBASER + 4:
|
||||
cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 32, 32, value);
|
||||
return MEMTX_OK;
|
||||
case GICR_VPENDBASER:
|
||||
gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 0, 32, value));
|
||||
return MEMTX_OK;
|
||||
case GICR_VPENDBASER + 4:
|
||||
gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 32, 32, value));
|
||||
return MEMTX_OK;
|
||||
default:
|
||||
return MEMTX_ERROR;
|
||||
}
|
||||
|
@ -397,6 +638,17 @@ static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
|
|||
case GICR_PENDBASER:
|
||||
*data = cs->gicr_pendbaser;
|
||||
return MEMTX_OK;
|
||||
/*
|
||||
* VLPI frame registers. We don't need a version check for
|
||||
* VPROPBASER and VPENDBASER because gicv3_redist_size() will
|
||||
* prevent pre-v4 GIC from passing us offsets this high.
|
||||
*/
|
||||
case GICR_VPROPBASER:
|
||||
*data = cs->gicr_vpropbaser;
|
||||
return MEMTX_OK;
|
||||
case GICR_VPENDBASER:
|
||||
*data = cs->gicr_vpendbaser;
|
||||
return MEMTX_OK;
|
||||
default:
|
||||
return MEMTX_ERROR;
|
||||
}
|
||||
|
@ -418,6 +670,17 @@ static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
|
|||
"%s: invalid guest write to RO register at offset "
|
||||
TARGET_FMT_plx "\n", __func__, offset);
|
||||
return MEMTX_OK;
|
||||
/*
|
||||
* VLPI frame registers. We don't need a version check for
|
||||
* VPROPBASER and VPENDBASER because gicv3_redist_size() will
|
||||
* prevent pre-v4 GIC from passing us offsets this high.
|
||||
*/
|
||||
case GICR_VPROPBASER:
|
||||
cs->gicr_vpropbaser = value;
|
||||
return MEMTX_OK;
|
||||
case GICR_VPENDBASER:
|
||||
gicr_write_vpendbaser(cs, value);
|
||||
return MEMTX_OK;
|
||||
default:
|
||||
return MEMTX_ERROR;
|
||||
}
|
||||
|
@ -442,8 +705,8 @@ MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
|
|||
* in the memory map); if so then the GIC has multiple MemoryRegions
|
||||
* for the redistributors.
|
||||
*/
|
||||
cpuidx = region->cpuidx + offset / GICV3_REDIST_SIZE;
|
||||
offset %= GICV3_REDIST_SIZE;
|
||||
cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
|
||||
offset %= gicv3_redist_size(s);
|
||||
|
||||
cs = &s->cpu[cpuidx];
|
||||
|
||||
|
@ -501,8 +764,8 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
|
|||
* in the memory map); if so then the GIC has multiple MemoryRegions
|
||||
* for the redistributors.
|
||||
*/
|
||||
cpuidx = region->cpuidx + offset / GICV3_REDIST_SIZE;
|
||||
offset %= GICV3_REDIST_SIZE;
|
||||
cpuidx = region->cpuidx + offset / gicv3_redist_size(s);
|
||||
offset %= gicv3_redist_size(s);
|
||||
|
||||
cs = &s->cpu[cpuidx];
|
||||
|
||||
|
@ -542,34 +805,11 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
|
|||
|
||||
static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq)
|
||||
{
|
||||
AddressSpace *as = &cs->gic->dma_as;
|
||||
uint64_t lpict_baddr;
|
||||
uint8_t lpite;
|
||||
uint8_t prio;
|
||||
uint64_t lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
|
||||
|
||||
lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
|
||||
|
||||
address_space_read(as, lpict_baddr + ((irq - GICV3_LPI_INTID_START) *
|
||||
sizeof(lpite)), MEMTXATTRS_UNSPECIFIED, &lpite,
|
||||
sizeof(lpite));
|
||||
|
||||
if (!(lpite & LPI_CTE_ENABLED)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
|
||||
prio = lpite & LPI_PRIORITY_MASK;
|
||||
} else {
|
||||
prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
|
||||
}
|
||||
|
||||
if ((prio < cs->hpplpi.prio) ||
|
||||
((prio == cs->hpplpi.prio) && (irq <= cs->hpplpi.irq))) {
|
||||
cs->hpplpi.irq = irq;
|
||||
cs->hpplpi.prio = prio;
|
||||
/* LPIs are always non-secure Grp1 interrupts */
|
||||
cs->hpplpi.grp = GICV3_G1NS;
|
||||
}
|
||||
update_for_one_lpi(cs, irq, lpict_baddr,
|
||||
cs->gic->gicd_ctlr & GICD_CTLR_DS,
|
||||
&cs->hpplpi);
|
||||
}
|
||||
|
||||
void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
|
||||
|
@ -581,11 +821,7 @@ void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
|
|||
* priority is lower than the last computed high priority lpi interrupt.
|
||||
* If yes, replace current LPI as the new high priority lpi interrupt.
|
||||
*/
|
||||
AddressSpace *as = &cs->gic->dma_as;
|
||||
uint64_t lpipt_baddr;
|
||||
uint32_t pendt_size = 0;
|
||||
uint8_t pend;
|
||||
int i, bit;
|
||||
uint64_t lpipt_baddr, lpict_baddr;
|
||||
uint64_t idbits;
|
||||
|
||||
idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
|
||||
|
@ -595,23 +831,11 @@ void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
|
|||
return;
|
||||
}
|
||||
|
||||
cs->hpplpi.prio = 0xff;
|
||||
|
||||
lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
|
||||
lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
|
||||
|
||||
/* Determine the highest priority pending interrupt among LPIs */
|
||||
pendt_size = (1ULL << (idbits + 1));
|
||||
|
||||
for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
|
||||
address_space_read(as, lpipt_baddr + i, MEMTXATTRS_UNSPECIFIED, &pend,
|
||||
sizeof(pend));
|
||||
|
||||
while (pend) {
|
||||
bit = ctz32(pend);
|
||||
gicv3_redist_check_lpi_priority(cs, i * 8 + bit);
|
||||
pend &= ~(1 << bit);
|
||||
}
|
||||
}
|
||||
update_for_all_lpis(cs, lpipt_baddr, lpict_baddr, idbits,
|
||||
cs->gic->gicd_ctlr & GICD_CTLR_DS, &cs->hpplpi);
|
||||
}
|
||||
|
||||
void gicv3_redist_update_lpi(GICv3CPUState *cs)
|
||||
|
@ -626,30 +850,13 @@ void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level)
|
|||
* This function updates the pending bit in lpi pending table for
|
||||
* the irq being activated or deactivated.
|
||||
*/
|
||||
AddressSpace *as = &cs->gic->dma_as;
|
||||
uint64_t lpipt_baddr;
|
||||
bool ispend = false;
|
||||
uint8_t pend;
|
||||
|
||||
/*
|
||||
* get the bit value corresponding to this irq in the
|
||||
* lpi pending table
|
||||
*/
|
||||
lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
|
||||
|
||||
address_space_read(as, lpipt_baddr + ((irq / 8) * sizeof(pend)),
|
||||
MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend));
|
||||
|
||||
ispend = extract32(pend, irq % 8, 1);
|
||||
|
||||
/* no change in the value of pending bit, return */
|
||||
if (ispend == level) {
|
||||
if (!set_pending_table_bit(cs, lpipt_baddr, irq, level)) {
|
||||
/* no change in the value of pending bit, return */
|
||||
return;
|
||||
}
|
||||
pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
|
||||
|
||||
address_space_write(as, lpipt_baddr + ((irq / 8) * sizeof(pend)),
|
||||
MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend));
|
||||
|
||||
/*
|
||||
* check if this LPI is better than the current hpplpi, if yes
|
||||
|
@ -681,6 +888,17 @@ void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
|
|||
gicv3_redist_lpi_pending(cs, irq, level);
|
||||
}
|
||||
|
||||
void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq)
|
||||
{
|
||||
/*
|
||||
* The only cached information for LPIs we have is the HPPLPI.
|
||||
* We could be cleverer about identifying when we don't need
|
||||
* to do a full rescan of the pending table, but until we find
|
||||
* this is a performance issue, just always recalculate.
|
||||
*/
|
||||
gicv3_redist_update_lpi(cs);
|
||||
}
|
||||
|
||||
void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq)
|
||||
{
|
||||
/*
|
||||
|
@ -691,11 +909,9 @@ void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq)
|
|||
* we choose to NOP. If LPIs are disabled on source there's nothing
|
||||
* to be transferred anyway.
|
||||
*/
|
||||
AddressSpace *as = &src->gic->dma_as;
|
||||
uint64_t idbits;
|
||||
uint32_t pendt_size;
|
||||
uint64_t src_baddr;
|
||||
uint8_t src_pend;
|
||||
|
||||
if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
|
||||
!(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
|
||||
|
@ -714,15 +930,10 @@ void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq)
|
|||
|
||||
src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
|
||||
|
||||
address_space_read(as, src_baddr + (irq / 8),
|
||||
MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend));
|
||||
if (!extract32(src_pend, irq % 8, 1)) {
|
||||
if (!set_pending_table_bit(src, src_baddr, irq, 0)) {
|
||||
/* Not pending on source, nothing to do */
|
||||
return;
|
||||
}
|
||||
src_pend &= ~(1 << (irq % 8));
|
||||
address_space_write(as, src_baddr + (irq / 8),
|
||||
MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend));
|
||||
if (irq == src->hpplpi.irq) {
|
||||
/*
|
||||
* We just made this LPI not-pending so only need to update
|
||||
|
@ -788,6 +999,117 @@ void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest)
|
|||
gicv3_redist_update_lpi(dest);
|
||||
}
|
||||
|
||||
void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level)
|
||||
{
|
||||
/*
|
||||
* Change the pending state of the specified vLPI.
|
||||
* Unlike gicv3_redist_process_vlpi(), we know here that the
|
||||
* vCPU is definitely resident on this redistributor, and that
|
||||
* the irq is in range.
|
||||
*/
|
||||
uint64_t vptbase, ctbase;
|
||||
|
||||
vptbase = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, PHYADDR) << 16;
|
||||
|
||||
if (set_pending_table_bit(cs, vptbase, irq, level)) {
|
||||
if (level) {
|
||||
/* Check whether this vLPI is now the best */
|
||||
ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
|
||||
update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi);
|
||||
gicv3_cpuif_virt_irq_fiq_update(cs);
|
||||
} else {
|
||||
/* Only need to recalculate if this was previously the best vLPI */
|
||||
if (irq == cs->hppvlpi.irq) {
|
||||
gicv3_redist_update_vlpi(cs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
|
||||
int doorbell, int level)
|
||||
{
|
||||
bool bit_changed;
|
||||
bool resident = vcpu_resident(cs, vptaddr);
|
||||
uint64_t ctbase;
|
||||
|
||||
if (resident) {
|
||||
uint32_t idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS);
|
||||
if (irq >= (1ULL << (idbits + 1))) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
bit_changed = set_pending_table_bit(cs, vptaddr, irq, level);
|
||||
if (resident && bit_changed) {
|
||||
if (level) {
|
||||
/* Check whether this vLPI is now the best */
|
||||
ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK;
|
||||
update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi);
|
||||
gicv3_cpuif_virt_irq_fiq_update(cs);
|
||||
} else {
|
||||
/* Only need to recalculate if this was previously the best vLPI */
|
||||
if (irq == cs->hppvlpi.irq) {
|
||||
gicv3_redist_update_vlpi(cs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!resident && level && doorbell != INTID_SPURIOUS &&
|
||||
(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
|
||||
/* vCPU is not currently resident: ring the doorbell */
|
||||
gicv3_redist_process_lpi(cs, doorbell, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
|
||||
GICv3CPUState *dest, uint64_t dest_vptaddr,
|
||||
int irq, int doorbell)
|
||||
{
|
||||
/*
|
||||
* Move the specified vLPI's pending state from the source redistributor
|
||||
* to the destination.
|
||||
*/
|
||||
if (!set_pending_table_bit(src, src_vptaddr, irq, 0)) {
|
||||
/* Not pending on source, nothing to do */
|
||||
return;
|
||||
}
|
||||
if (vcpu_resident(src, src_vptaddr) && irq == src->hppvlpi.irq) {
|
||||
/*
|
||||
* Update src's cached highest-priority pending vLPI if we just made
|
||||
* it not-pending
|
||||
*/
|
||||
gicv3_redist_update_vlpi(src);
|
||||
}
|
||||
/*
|
||||
* Mark the vLPI pending on the destination (ringing the doorbell
|
||||
* if the vCPU isn't resident)
|
||||
*/
|
||||
gicv3_redist_process_vlpi(dest, irq, dest_vptaddr, doorbell, irq);
|
||||
}
|
||||
|
||||
void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr)
|
||||
{
|
||||
if (!vcpu_resident(cs, vptaddr)) {
|
||||
/* We don't have anything cached if the vCPU isn't resident */
|
||||
return;
|
||||
}
|
||||
|
||||
/* Otherwise, our only cached information is the HPPVLPI info */
|
||||
gicv3_redist_update_vlpi(cs);
|
||||
}
|
||||
|
||||
void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr)
|
||||
{
|
||||
/*
|
||||
* The only cached information for LPIs we have is the HPPLPI.
|
||||
* We could be cleverer about identifying when we don't need
|
||||
* to do a full rescan of the pending table, but until we find
|
||||
* this is a performance issue, just always recalculate.
|
||||
*/
|
||||
gicv3_redist_vinvall(cs, vptaddr);
|
||||
}
|
||||
|
||||
void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
|
||||
{
|
||||
/* Update redistributor state for a change in an external PPI input line */
|
||||
|
|
|
@ -77,6 +77,7 @@
|
|||
* Redistributor frame offsets from RD_base
|
||||
*/
|
||||
#define GICR_SGI_OFFSET 0x10000
|
||||
#define GICR_VLPI_OFFSET 0x20000
|
||||
|
||||
/*
|
||||
* Redistributor registers, offsets from RD_base
|
||||
|
@ -109,6 +110,10 @@
|
|||
#define GICR_IGRPMODR0 (GICR_SGI_OFFSET + 0x0D00)
|
||||
#define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00)
|
||||
|
||||
/* VLPI redistributor registers, offsets from VLPI_base */
|
||||
#define GICR_VPROPBASER (GICR_VLPI_OFFSET + 0x70)
|
||||
#define GICR_VPENDBASER (GICR_VLPI_OFFSET + 0x78)
|
||||
|
||||
#define GICR_CTLR_ENABLE_LPIS (1U << 0)
|
||||
#define GICR_CTLR_CES (1U << 1)
|
||||
#define GICR_CTLR_RWP (1U << 3)
|
||||
|
@ -143,6 +148,22 @@ FIELD(GICR_PENDBASER, PTZ, 62, 1)
|
|||
|
||||
#define GICR_PROPBASER_IDBITS_THRESHOLD 0xd
|
||||
|
||||
/* These are the GICv4 VPROPBASER and VPENDBASER layouts; v4.1 is different */
|
||||
FIELD(GICR_VPROPBASER, IDBITS, 0, 5)
|
||||
FIELD(GICR_VPROPBASER, INNERCACHE, 7, 3)
|
||||
FIELD(GICR_VPROPBASER, SHAREABILITY, 10, 2)
|
||||
FIELD(GICR_VPROPBASER, PHYADDR, 12, 40)
|
||||
FIELD(GICR_VPROPBASER, OUTERCACHE, 56, 3)
|
||||
|
||||
FIELD(GICR_VPENDBASER, INNERCACHE, 7, 3)
|
||||
FIELD(GICR_VPENDBASER, SHAREABILITY, 10, 2)
|
||||
FIELD(GICR_VPENDBASER, PHYADDR, 16, 36)
|
||||
FIELD(GICR_VPENDBASER, OUTERCACHE, 56, 3)
|
||||
FIELD(GICR_VPENDBASER, DIRTY, 60, 1)
|
||||
FIELD(GICR_VPENDBASER, PENDINGLAST, 61, 1)
|
||||
FIELD(GICR_VPENDBASER, IDAI, 62, 1)
|
||||
FIELD(GICR_VPENDBASER, VALID, 63, 1)
|
||||
|
||||
#define ICC_CTLR_EL1_CBPR (1U << 0)
|
||||
#define ICC_CTLR_EL1_EOIMODE (1U << 1)
|
||||
#define ICC_CTLR_EL1_PMHE (1U << 6)
|
||||
|
@ -280,6 +301,7 @@ FIELD(GITS_CTLR, ENABLED, 0, 1)
|
|||
FIELD(GITS_CTLR, QUIESCENT, 31, 1)
|
||||
|
||||
FIELD(GITS_TYPER, PHYSICAL, 0, 1)
|
||||
FIELD(GITS_TYPER, VIRTUAL, 1, 1)
|
||||
FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4)
|
||||
FIELD(GITS_TYPER, IDBITS, 8, 5)
|
||||
FIELD(GITS_TYPER, DEVBITS, 13, 5)
|
||||
|
@ -287,6 +309,7 @@ FIELD(GITS_TYPER, SEIS, 18, 1)
|
|||
FIELD(GITS_TYPER, PTA, 19, 1)
|
||||
FIELD(GITS_TYPER, CIDBITS, 32, 4)
|
||||
FIELD(GITS_TYPER, CIL, 36, 1)
|
||||
FIELD(GITS_TYPER, VMOVP, 37, 1)
|
||||
|
||||
#define GITS_IDREGS 0xFFD0
|
||||
|
||||
|
@ -298,6 +321,7 @@ FIELD(GITS_TYPER, CIL, 36, 1)
|
|||
#define GITS_BASER_PAGESIZE_64K 2
|
||||
|
||||
#define GITS_BASER_TYPE_DEVICE 1ULL
|
||||
#define GITS_BASER_TYPE_VPE 2ULL
|
||||
#define GITS_BASER_TYPE_COLLECTION 4ULL
|
||||
|
||||
#define GITS_PAGE_SIZE_4K 0x1000
|
||||
|
@ -327,6 +351,13 @@ FIELD(GITS_TYPER, CIL, 36, 1)
|
|||
#define GITS_CMD_INVALL 0x0D
|
||||
#define GITS_CMD_MOVALL 0x0E
|
||||
#define GITS_CMD_DISCARD 0x0F
|
||||
#define GITS_CMD_VMOVI 0x21
|
||||
#define GITS_CMD_VMOVP 0x22
|
||||
#define GITS_CMD_VSYNC 0x25
|
||||
#define GITS_CMD_VMAPP 0x29
|
||||
#define GITS_CMD_VMAPTI 0x2A
|
||||
#define GITS_CMD_VMAPI 0x2B
|
||||
#define GITS_CMD_VINVALL 0x2D
|
||||
|
||||
/* MAPC command fields */
|
||||
#define ICID_LENGTH 16
|
||||
|
@ -366,6 +397,46 @@ FIELD(MOVI_0, DEVICEID, 32, 32)
|
|||
FIELD(MOVI_1, EVENTID, 0, 32)
|
||||
FIELD(MOVI_2, ICID, 0, 16)
|
||||
|
||||
/* INV command fields */
|
||||
FIELD(INV_0, DEVICEID, 32, 32)
|
||||
FIELD(INV_1, EVENTID, 0, 32)
|
||||
|
||||
/* VMAPI, VMAPTI command fields */
|
||||
FIELD(VMAPTI_0, DEVICEID, 32, 32)
|
||||
FIELD(VMAPTI_1, EVENTID, 0, 32)
|
||||
FIELD(VMAPTI_1, VPEID, 32, 16)
|
||||
FIELD(VMAPTI_2, VINTID, 0, 32) /* VMAPTI only */
|
||||
FIELD(VMAPTI_2, DOORBELL, 32, 32)
|
||||
|
||||
/* VMAPP command fields */
|
||||
FIELD(VMAPP_0, ALLOC, 8, 1) /* GICv4.1 only */
|
||||
FIELD(VMAPP_0, PTZ, 9, 1) /* GICv4.1 only */
|
||||
FIELD(VMAPP_0, VCONFADDR, 16, 36) /* GICv4.1 only */
|
||||
FIELD(VMAPP_1, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */
|
||||
FIELD(VMAPP_1, VPEID, 32, 16)
|
||||
FIELD(VMAPP_2, RDBASE, 16, 36)
|
||||
FIELD(VMAPP_2, V, 63, 1)
|
||||
FIELD(VMAPP_3, VPTSIZE, 0, 8) /* For GICv4.0, bits [7:6] are RES0 */
|
||||
FIELD(VMAPP_3, VPTADDR, 16, 36)
|
||||
|
||||
/* VMOVP command fields */
|
||||
FIELD(VMOVP_0, SEQNUM, 32, 16) /* not used for GITS_TYPER.VMOVP == 1 */
|
||||
FIELD(VMOVP_1, ITSLIST, 0, 16) /* not used for GITS_TYPER.VMOVP == 1 */
|
||||
FIELD(VMOVP_1, VPEID, 32, 16)
|
||||
FIELD(VMOVP_2, RDBASE, 16, 36)
|
||||
FIELD(VMOVP_2, DB, 63, 1) /* GICv4.1 only */
|
||||
FIELD(VMOVP_3, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */
|
||||
|
||||
/* VMOVI command fields */
|
||||
FIELD(VMOVI_0, DEVICEID, 32, 32)
|
||||
FIELD(VMOVI_1, EVENTID, 0, 32)
|
||||
FIELD(VMOVI_1, VPEID, 32, 16)
|
||||
FIELD(VMOVI_2, D, 0, 1)
|
||||
FIELD(VMOVI_2, DOORBELL, 32, 32)
|
||||
|
||||
/* VINVALL command fields */
|
||||
FIELD(VINVALL_1, VPEID, 32, 16)
|
||||
|
||||
/*
|
||||
* 12 bytes Interrupt translation Table Entry size
|
||||
* as per Table 5.3 in GICv3 spec
|
||||
|
@ -419,6 +490,20 @@ FIELD(DTE, ITTADDR, 6, 44)
|
|||
FIELD(CTE, VALID, 0, 1)
|
||||
FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH)
|
||||
|
||||
/*
|
||||
* 8 bytes VPE table entry size:
|
||||
* Valid = 1 bit, VPTsize = 5 bits, VPTaddr = 36 bits, RDbase = 16 bits
|
||||
*
|
||||
* Field sizes for Valid and size are mandated; field sizes for RDbase
|
||||
* and VPT_addr are IMPDEF.
|
||||
*/
|
||||
#define GITS_VPE_SIZE 0x8ULL
|
||||
|
||||
FIELD(VTE, VALID, 0, 1)
|
||||
FIELD(VTE, VPTSIZE, 1, 5)
|
||||
FIELD(VTE, VPTADDR, 6, 36)
|
||||
FIELD(VTE, RDBASE, 42, RDBASE_PROCNUM_LENGTH)
|
||||
|
||||
/* Special interrupt IDs */
|
||||
#define INTID_SECURE 1020
|
||||
#define INTID_NONSECURE 1021
|
||||
|
@ -426,6 +511,27 @@ FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH)
|
|||
|
||||
/* Functions internal to the emulated GICv3 */
|
||||
|
||||
/**
|
||||
* gicv3_redist_size:
|
||||
* @s: GICv3State
|
||||
*
|
||||
* Return the size of the redistributor register frame in bytes
|
||||
* (which depends on what GIC version this is)
|
||||
*/
|
||||
static inline int gicv3_redist_size(GICv3State *s)
|
||||
{
|
||||
/*
|
||||
* Redistributor size is controlled by the redistributor GICR_TYPER.VLPIS.
|
||||
* It's the same for every redistributor in the GIC, so arbitrarily
|
||||
* use the register field in the first one.
|
||||
*/
|
||||
if (s->cpu[0].gicr_typer & GICR_TYPER_VLPIS) {
|
||||
return GICV4_REDIST_SIZE;
|
||||
} else {
|
||||
return GICV3_REDIST_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gicv3_intid_is_special:
|
||||
* @intid: interrupt ID
|
||||
|
@ -490,6 +596,36 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
|
|||
void gicv3_dist_set_irq(GICv3State *s, int irq, int level);
|
||||
void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level);
|
||||
void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level);
|
||||
/**
|
||||
* gicv3_redist_process_vlpi:
|
||||
* @cs: GICv3CPUState
|
||||
* @irq: (virtual) interrupt number
|
||||
* @vptaddr: (guest) address of VLPI table
|
||||
* @doorbell: doorbell (physical) interrupt number (1023 for "no doorbell")
|
||||
* @level: level to set @irq to
|
||||
*
|
||||
* Process a virtual LPI being directly injected by the ITS. This function
|
||||
* will update the VLPI table specified by @vptaddr and @vptsize. If the
|
||||
* vCPU corresponding to that VLPI table is currently running on
|
||||
* the CPU associated with this redistributor, directly inject the VLPI
|
||||
* @irq. If the vCPU is not running on this CPU, raise the doorbell
|
||||
* interrupt instead.
|
||||
*/
|
||||
void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr,
|
||||
int doorbell, int level);
|
||||
/**
|
||||
* gicv3_redist_vlpi_pending:
|
||||
* @cs: GICv3CPUState
|
||||
* @irq: (virtual) interrupt number
|
||||
* @level: level to set @irq to
|
||||
*
|
||||
* Set/clear the pending status of a virtual LPI in the vLPI table
|
||||
* that this redistributor is currently using. (The difference between
|
||||
* this and gicv3_redist_process_vlpi() is that this is called from
|
||||
* the cpuif and does not need to do the not-running-on-this-vcpu checks.)
|
||||
*/
|
||||
void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level);
|
||||
|
||||
void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level);
|
||||
/**
|
||||
* gicv3_redist_update_lpi:
|
||||
|
@ -509,6 +645,23 @@ void gicv3_redist_update_lpi(GICv3CPUState *cs);
|
|||
* an incoming migration has loaded new state.
|
||||
*/
|
||||
void gicv3_redist_update_lpi_only(GICv3CPUState *cs);
|
||||
/**
|
||||
* gicv3_redist_inv_lpi:
|
||||
* @cs: GICv3CPUState
|
||||
* @irq: LPI to invalidate cached information for
|
||||
*
|
||||
* Forget or update any cached information associated with this LPI.
|
||||
*/
|
||||
void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq);
|
||||
/**
|
||||
* gicv3_redist_inv_vlpi:
|
||||
* @cs: GICv3CPUState
|
||||
* @irq: vLPI to invalidate cached information for
|
||||
* @vptaddr: (guest) address of vLPI table
|
||||
*
|
||||
* Forget or update any cached information associated with this vLPI.
|
||||
*/
|
||||
void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr);
|
||||
/**
|
||||
* gicv3_redist_mov_lpi:
|
||||
* @src: source redistributor
|
||||
|
@ -529,6 +682,30 @@ void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq);
|
|||
* by the ITS MOVALL command.
|
||||
*/
|
||||
void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest);
|
||||
/**
|
||||
* gicv3_redist_mov_vlpi:
|
||||
* @src: source redistributor
|
||||
* @src_vptaddr: (guest) address of source VLPI table
|
||||
* @dest: destination redistributor
|
||||
* @dest_vptaddr: (guest) address of destination VLPI table
|
||||
* @irq: VLPI to update
|
||||
* @doorbell: doorbell for destination (1023 for "no doorbell")
|
||||
*
|
||||
* Move the pending state of the specified VLPI from @src to @dest,
|
||||
* as required by the ITS VMOVI command.
|
||||
*/
|
||||
void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr,
|
||||
GICv3CPUState *dest, uint64_t dest_vptaddr,
|
||||
int irq, int doorbell);
|
||||
/**
|
||||
* gicv3_redist_vinvall:
|
||||
* @cs: GICv3CPUState
|
||||
* @vptaddr: address of VLPI pending table
|
||||
*
|
||||
* On redistributor @cs, invalidate all cached information associated
|
||||
* with the vCPU defined by @vptaddr.
|
||||
*/
|
||||
void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr);
|
||||
|
||||
void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns);
|
||||
void gicv3_init_cpuif(GICv3State *s);
|
||||
|
@ -544,6 +721,17 @@ void gicv3_init_cpuif(GICv3State *s);
|
|||
*/
|
||||
void gicv3_cpuif_update(GICv3CPUState *cs);
|
||||
|
||||
/*
|
||||
* gicv3_cpuif_virt_irq_fiq_update:
|
||||
* @cs: GICv3CPUState for the CPU to update
|
||||
*
|
||||
* Recalculate whether to assert the virtual IRQ or FIQ lines after
|
||||
* a change to the current highest priority pending virtual interrupt.
|
||||
* Note that this does not recalculate and change the maintenance
|
||||
* interrupt status (for that, see gicv3_cpuif_virt_update()).
|
||||
*/
|
||||
void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs);
|
||||
|
||||
static inline uint32_t gicv3_iidr(void)
|
||||
{
|
||||
/* Return the Implementer Identification Register value
|
||||
|
@ -555,17 +743,34 @@ static inline uint32_t gicv3_iidr(void)
|
|||
return 0x43b;
|
||||
}
|
||||
|
||||
static inline uint32_t gicv3_idreg(int regoffset)
|
||||
/* CoreSight PIDR0 values for ARM GICv3 implementations */
|
||||
#define GICV3_PIDR0_DIST 0x92
|
||||
#define GICV3_PIDR0_REDIST 0x93
|
||||
#define GICV3_PIDR0_ITS 0x94
|
||||
|
||||
static inline uint32_t gicv3_idreg(GICv3State *s, int regoffset, uint8_t pidr0)
|
||||
{
|
||||
/* Return the value of the CoreSight ID register at the specified
|
||||
* offset from the first ID register (as found in the distributor
|
||||
* and redistributor register banks).
|
||||
* These values indicate an ARM implementation of a GICv3.
|
||||
* These values indicate an ARM implementation of a GICv3 or v4.
|
||||
*/
|
||||
static const uint8_t gicd_ids[] = {
|
||||
0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x3B, 0x00, 0x0D, 0xF0, 0x05, 0xB1
|
||||
0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x0B, 0x00, 0x0D, 0xF0, 0x05, 0xB1
|
||||
};
|
||||
return gicd_ids[regoffset / 4];
|
||||
uint32_t id;
|
||||
|
||||
regoffset /= 4;
|
||||
|
||||
if (regoffset == 4) {
|
||||
return pidr0;
|
||||
}
|
||||
id = gicd_ids[regoffset];
|
||||
if (regoffset == 6) {
|
||||
/* PIDR2 bits [7:4] are the GIC architecture revision */
|
||||
id |= s->revision << 4;
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -151,8 +151,9 @@ gicv3_icv_hppir_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_HPPIR%d rea
|
|||
gicv3_icv_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICV_DIR write cpu 0x%x value 0x%" PRIx64
|
||||
gicv3_icv_iar_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IAR%d read cpu 0x%x value 0x%" PRIx64
|
||||
gicv3_icv_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_EOIR%d write cpu 0x%x value 0x%" PRIx64
|
||||
gicv3_cpuif_virt_update(uint32_t cpuid, int idx) "GICv3 CPU i/f 0x%x virt HPPI update LR index %d"
|
||||
gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel, int maintlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting FIQ %d IRQ %d maintenance-irq %d"
|
||||
gicv3_cpuif_virt_update(uint32_t cpuid, int idx, int hppvlpi, int grp, int prio) "GICv3 CPU i/f 0x%x virt HPPI update LR index %d HPPVLPI %d grp %d prio %d"
|
||||
gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting FIQ %d IRQ %d"
|
||||
gicv3_cpuif_virt_set_maint_irq(uint32_t cpuid, int maintlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting maintenance-irq %d"
|
||||
|
||||
# arm_gicv3_dist.c
|
||||
gicv3_dist_read(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d"
|
||||
|
@ -184,9 +185,17 @@ gicv3_its_cmd_mapd(uint32_t devid, uint32_t size, uint64_t ittaddr, int valid) "
|
|||
gicv3_its_cmd_mapc(uint32_t icid, uint64_t rdbase, int valid) "GICv3 ITS: command MAPC ICID 0x%x RDbase 0x%" PRIx64 " V %d"
|
||||
gicv3_its_cmd_mapi(uint32_t devid, uint32_t eventid, uint32_t icid) "GICv3 ITS: command MAPI DeviceID 0x%x EventID 0x%x ICID 0x%x"
|
||||
gicv3_its_cmd_mapti(uint32_t devid, uint32_t eventid, uint32_t icid, uint32_t intid) "GICv3 ITS: command MAPTI DeviceID 0x%x EventID 0x%x ICID 0x%x pINTID 0x%x"
|
||||
gicv3_its_cmd_inv(void) "GICv3 ITS: command INV or INVALL"
|
||||
gicv3_its_cmd_inv(uint32_t devid, uint32_t eventid) "GICv3 ITS: command INV DeviceID 0x%x EventID 0x%x"
|
||||
gicv3_its_cmd_invall(void) "GICv3 ITS: command INVALL"
|
||||
gicv3_its_cmd_movall(uint64_t rd1, uint64_t rd2) "GICv3 ITS: command MOVALL RDbase1 0x%" PRIx64 " RDbase2 0x%" PRIx64
|
||||
gicv3_its_cmd_movi(uint32_t devid, uint32_t eventid, uint32_t icid) "GICv3 ITS: command MOVI DeviceID 0x%x EventID 0x%x ICID 0x%x"
|
||||
gicv3_its_cmd_vmapi(uint32_t devid, uint32_t eventid, uint32_t vpeid, uint32_t doorbell) "GICv3 ITS: command VMAPI DeviceID 0x%x EventID 0x%x vPEID 0x%x Dbell_pINTID 0x%x"
|
||||
gicv3_its_cmd_vmapti(uint32_t devid, uint32_t eventid, uint32_t vpeid, uint32_t vintid, uint32_t doorbell) "GICv3 ITS: command VMAPI DeviceID 0x%x EventID 0x%x vPEID 0x%x vINTID 0x%x Dbell_pINTID 0x%x"
|
||||
gicv3_its_cmd_vmapp(uint32_t vpeid, uint64_t rdbase, int valid, uint64_t vptaddr, uint32_t vptsize) "GICv3 ITS: command VMAPP vPEID 0x%x RDbase 0x%" PRIx64 " V %d VPT_addr 0x%" PRIx64 " VPT_size 0x%x"
|
||||
gicv3_its_cmd_vmovp(uint32_t vpeid, uint64_t rdbase) "GICv3 ITS: command VMOVP vPEID 0x%x RDbase 0x%" PRIx64
|
||||
gicv3_its_cmd_vsync(void) "GICv3 ITS: command VSYNC"
|
||||
gicv3_its_cmd_vmovi(uint32_t devid, uint32_t eventid, uint32_t vpeid, int dbvalid, uint32_t doorbell) "GICv3 ITS: command VMOVI DeviceID 0x%x EventID 0x%x vPEID 0x%x D %d Dbell_pINTID 0x%x"
|
||||
gicv3_its_cmd_vinvall(uint32_t vpeid) "GICv3 ITS: command VINVALL vPEID 0x%x"
|
||||
gicv3_its_cmd_unknown(unsigned cmd) "GICv3 ITS: unknown command 0x%x"
|
||||
gicv3_its_cte_read(uint32_t icid, int valid, uint32_t rdbase) "GICv3 ITS: Collection Table read for ICID 0x%x: valid %d RDBase 0x%x"
|
||||
gicv3_its_cte_write(uint32_t icid, int valid, uint32_t rdbase) "GICv3 ITS: Collection Table write for ICID 0x%x: valid %d RDBase 0x%x"
|
||||
|
@ -197,6 +206,9 @@ gicv3_its_ite_write(uint64_t ittaddr, uint32_t eventid, int valid, int inttype,
|
|||
gicv3_its_dte_read(uint32_t devid, int valid, uint32_t size, uint64_t ittaddr) "GICv3 ITS: Device Table read for DeviceID 0x%x: valid %d size 0x%x ITTaddr 0x%" PRIx64
|
||||
gicv3_its_dte_write(uint32_t devid, int valid, uint32_t size, uint64_t ittaddr) "GICv3 ITS: Device Table write for DeviceID 0x%x: valid %d size 0x%x ITTaddr 0x%" PRIx64
|
||||
gicv3_its_dte_read_fault(uint32_t devid) "GICv3 ITS: Device Table read for DeviceID 0x%x: faulted"
|
||||
gicv3_its_vte_read(uint32_t vpeid, int valid, uint32_t vptsize, uint64_t vptaddr, uint32_t rdbase) "GICv3 ITS: vPE Table read for vPEID 0x%x: valid %d VPTsize 0x%x VPTaddr 0x%" PRIx64 " RDbase 0x%x"
|
||||
gicv3_its_vte_read_fault(uint32_t vpeid) "GICv3 ITS: vPE Table read for vPEID 0x%x: faulted"
|
||||
gicv3_its_vte_write(uint32_t vpeid, int valid, uint32_t vptsize, uint64_t vptaddr, uint32_t rdbase) "GICv3 ITS: vPE Table write for vPEID 0x%x: valid %d VPTsize 0x%x VPTaddr 0x%" PRIx64 " RDbase 0x%x"
|
||||
|
||||
# armv7m_nvic.c
|
||||
nvic_recompute_state(int vectpending, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d vectpending_prio %d exception_prio %d"
|
||||
|
|
|
@ -113,6 +113,7 @@ typedef enum VirtGICType {
|
|||
VIRT_GIC_VERSION_HOST,
|
||||
VIRT_GIC_VERSION_2,
|
||||
VIRT_GIC_VERSION_3,
|
||||
VIRT_GIC_VERSION_4,
|
||||
VIRT_GIC_VERSION_NOSEL,
|
||||
} VirtGICType;
|
||||
|
||||
|
@ -185,13 +186,25 @@ OBJECT_DECLARE_TYPE(VirtMachineState, VirtMachineClass, VIRT_MACHINE)
|
|||
void virt_acpi_setup(VirtMachineState *vms);
|
||||
bool virt_is_acpi_enabled(VirtMachineState *vms);
|
||||
|
||||
/* Return number of redistributors that fit in the specified region */
|
||||
static uint32_t virt_redist_capacity(VirtMachineState *vms, int region)
|
||||
{
|
||||
uint32_t redist_size;
|
||||
|
||||
if (vms->gic_version == VIRT_GIC_VERSION_3) {
|
||||
redist_size = GICV3_REDIST_SIZE;
|
||||
} else {
|
||||
redist_size = GICV4_REDIST_SIZE;
|
||||
}
|
||||
return vms->memmap[region].size / redist_size;
|
||||
}
|
||||
|
||||
/* Return the number of used redistributor regions */
|
||||
static inline int virt_gicv3_redist_region_count(VirtMachineState *vms)
|
||||
{
|
||||
uint32_t redist0_capacity =
|
||||
vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
|
||||
uint32_t redist0_capacity = virt_redist_capacity(vms, VIRT_GIC_REDIST);
|
||||
|
||||
assert(vms->gic_version == VIRT_GIC_VERSION_3);
|
||||
assert(vms->gic_version != VIRT_GIC_VERSION_2);
|
||||
|
||||
return (MACHINE(vms)->smp.cpus > redist0_capacity &&
|
||||
vms->highmem_redists) ? 2 : 1;
|
||||
|
|
|
@ -38,7 +38,12 @@
|
|||
|
||||
#define GICV3_LPI_INTID_START 8192
|
||||
|
||||
/*
|
||||
* The redistributor in GICv3 has two 64KB frames per CPU; in
|
||||
* GICv4 it has four 64KB frames per CPU.
|
||||
*/
|
||||
#define GICV3_REDIST_SIZE 0x20000
|
||||
#define GICV4_REDIST_SIZE 0x40000
|
||||
|
||||
/* Number of SGI target-list bits */
|
||||
#define GICV3_TARGETLIST_BITS 16
|
||||
|
@ -174,6 +179,9 @@ struct GICv3CPUState {
|
|||
uint32_t gicr_igrpmodr0;
|
||||
uint32_t gicr_nsacr;
|
||||
uint8_t gicr_ipriorityr[GIC_INTERNAL];
|
||||
/* VLPI_base page registers */
|
||||
uint64_t gicr_vpropbaser;
|
||||
uint64_t gicr_vpendbaser;
|
||||
|
||||
/* CPU interface */
|
||||
uint64_t icc_sre_el1;
|
||||
|
@ -211,6 +219,9 @@ struct GICv3CPUState {
|
|||
*/
|
||||
PendingIrq hpplpi;
|
||||
|
||||
/* Cached information recalculated from vLPI tables in guest memory */
|
||||
PendingIrq hppvlpi;
|
||||
|
||||
/* This is temporary working state, to avoid a malloc in gicv3_update() */
|
||||
bool seenbetter;
|
||||
};
|
||||
|
@ -272,6 +283,8 @@ struct GICv3State {
|
|||
uint32_t gicd_nsacr[DIV_ROUND_UP(GICV3_MAXIRQ, 16)];
|
||||
|
||||
GICv3CPUState *cpu;
|
||||
/* List of all ITSes connected to this GIC */
|
||||
GPtrArray *itslist;
|
||||
};
|
||||
|
||||
#define GICV3_BITMAP_ACCESSORS(BMP) \
|
||||
|
|
|
@ -78,6 +78,7 @@ struct GICv3ITSState {
|
|||
|
||||
TableDesc dt;
|
||||
TableDesc ct;
|
||||
TableDesc vpet;
|
||||
CmdQDesc cq;
|
||||
|
||||
Error *migration_blocker;
|
||||
|
@ -88,6 +89,24 @@ typedef struct GICv3ITSState GICv3ITSState;
|
|||
void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops,
|
||||
const MemoryRegionOps *tops);
|
||||
|
||||
/*
|
||||
* The ITS should call this when it is realized to add itself
|
||||
* to its GIC's list of connected ITSes.
|
||||
*/
|
||||
static inline void gicv3_add_its(GICv3State *s, DeviceState *its)
|
||||
{
|
||||
g_ptr_array_add(s->itslist, its);
|
||||
}
|
||||
|
||||
/*
|
||||
* The ITS can use this for operations that must be performed on
|
||||
* every ITS connected to the same GIC that it is
|
||||
*/
|
||||
static inline void gicv3_foreach_its(GICv3State *s, GFunc func, void *opaque)
|
||||
{
|
||||
g_ptr_array_foreach(s->itslist, func, opaque);
|
||||
}
|
||||
|
||||
#define TYPE_ARM_GICV3_ITS_COMMON "arm-gicv3-its-common"
|
||||
typedef struct GICv3ITSCommonClass GICv3ITSCommonClass;
|
||||
DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSCommonClass,
|
||||
|
|
|
@ -230,7 +230,7 @@ do_kernel_trap(CPUARMState *env)
|
|||
/* Jump back to the caller. */
|
||||
addr = env->regs[14];
|
||||
if (addr & 1) {
|
||||
env->thumb = 1;
|
||||
env->thumb = true;
|
||||
addr &= ~1;
|
||||
}
|
||||
env->regs[15] = addr;
|
||||
|
|
|
@ -51,7 +51,7 @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
|
|||
|
||||
if (is_a64(env)) {
|
||||
env->pc = value;
|
||||
env->thumb = 0;
|
||||
env->thumb = false;
|
||||
} else {
|
||||
env->regs[15] = value & ~1;
|
||||
env->thumb = value & 1;
|
||||
|
@ -189,7 +189,7 @@ static void arm_cpu_reset(DeviceState *dev)
|
|||
|
||||
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
|
||||
/* 64 bit CPUs always start in 64 bit mode */
|
||||
env->aarch64 = 1;
|
||||
env->aarch64 = true;
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
env->pstate = PSTATE_MODE_EL0t;
|
||||
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
|
||||
|
@ -694,6 +694,16 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
|
|||
[ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
|
||||
};
|
||||
|
||||
if (!arm_feature(env, ARM_FEATURE_EL2) &&
|
||||
(irq == ARM_CPU_VIRQ || irq == ARM_CPU_VFIQ)) {
|
||||
/*
|
||||
* The GIC might tell us about VIRQ and VFIQ state, but if we don't
|
||||
* have EL2 support we don't care. (Unless the guest is doing something
|
||||
* silly this will only be calls saying "level is still 0".)
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (level) {
|
||||
env->irq_line_state |= mask[irq];
|
||||
} else {
|
||||
|
@ -702,11 +712,9 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
|
|||
|
||||
switch (irq) {
|
||||
case ARM_CPU_VIRQ:
|
||||
assert(arm_feature(env, ARM_FEATURE_EL2));
|
||||
arm_cpu_update_virq(cpu);
|
||||
break;
|
||||
case ARM_CPU_VFIQ:
|
||||
assert(arm_feature(env, ARM_FEATURE_EL2));
|
||||
arm_cpu_update_vfiq(cpu);
|
||||
break;
|
||||
case ARM_CPU_IRQ:
|
||||
|
|
|
@ -259,7 +259,8 @@ typedef struct CPUArchState {
|
|||
* all other bits are stored in their correct places in env->pstate
|
||||
*/
|
||||
uint32_t pstate;
|
||||
uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
|
||||
bool aarch64; /* True if CPU is in aarch64 state; inverse of PSTATE.nRW */
|
||||
bool thumb; /* True if CPU is in thumb mode; cpsr[5] */
|
||||
|
||||
/* Cached TBFLAGS state. See below for which bits are included. */
|
||||
CPUARMTBFlags hflags;
|
||||
|
@ -286,7 +287,6 @@ typedef struct CPUArchState {
|
|||
uint32_t ZF; /* Z set if zero. */
|
||||
uint32_t QF; /* 0 or 1 */
|
||||
uint32_t GE; /* cpsr[19:16] */
|
||||
uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
|
||||
uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
|
||||
uint32_t btype; /* BTI branch type. spsr[11:10]. */
|
||||
uint64_t daif; /* exception masks, in the bits they are in PSTATE */
|
||||
|
@ -1233,6 +1233,20 @@ void pmu_init(ARMCPU *cpu);
|
|||
#define SCTLR_ATA0 (1ULL << 42) /* v8.5-MemTag */
|
||||
#define SCTLR_ATA (1ULL << 43) /* v8.5-MemTag */
|
||||
#define SCTLR_DSSBS_64 (1ULL << 44) /* v8.5, AArch64 only */
|
||||
#define SCTLR_TWEDEn (1ULL << 45) /* FEAT_TWED */
|
||||
#define SCTLR_TWEDEL MAKE_64_MASK(46, 4) /* FEAT_TWED */
|
||||
#define SCTLR_TMT0 (1ULL << 50) /* FEAT_TME */
|
||||
#define SCTLR_TMT (1ULL << 51) /* FEAT_TME */
|
||||
#define SCTLR_TME0 (1ULL << 52) /* FEAT_TME */
|
||||
#define SCTLR_TME (1ULL << 53) /* FEAT_TME */
|
||||
#define SCTLR_EnASR (1ULL << 54) /* FEAT_LS64_V */
|
||||
#define SCTLR_EnAS0 (1ULL << 55) /* FEAT_LS64_ACCDATA */
|
||||
#define SCTLR_EnALS (1ULL << 56) /* FEAT_LS64 */
|
||||
#define SCTLR_EPAN (1ULL << 57) /* FEAT_PAN3 */
|
||||
#define SCTLR_EnTP2 (1ULL << 60) /* FEAT_SME */
|
||||
#define SCTLR_NMI (1ULL << 61) /* FEAT_NMI */
|
||||
#define SCTLR_SPINTMASK (1ULL << 62) /* FEAT_NMI */
|
||||
#define SCTLR_TIDCP (1ULL << 63) /* FEAT_TIDCP1 */
|
||||
|
||||
#define CPTR_TCPAC (1U << 31)
|
||||
#define CPTR_TTA (1U << 20)
|
||||
|
@ -1545,6 +1559,18 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
|||
#define SCR_FIEN (1U << 21)
|
||||
#define SCR_ENSCXT (1U << 25)
|
||||
#define SCR_ATA (1U << 26)
|
||||
#define SCR_FGTEN (1U << 27)
|
||||
#define SCR_ECVEN (1U << 28)
|
||||
#define SCR_TWEDEN (1U << 29)
|
||||
#define SCR_TWEDEL MAKE_64BIT_MASK(30, 4)
|
||||
#define SCR_TME (1ULL << 34)
|
||||
#define SCR_AMVOFFEN (1ULL << 35)
|
||||
#define SCR_ENAS0 (1ULL << 36)
|
||||
#define SCR_ADEN (1ULL << 37)
|
||||
#define SCR_HXEN (1ULL << 38)
|
||||
#define SCR_TRNDR (1ULL << 40)
|
||||
#define SCR_ENTP2 (1ULL << 41)
|
||||
#define SCR_GPF (1ULL << 48)
|
||||
|
||||
#define HSTR_TTEE (1 << 16)
|
||||
#define HSTR_TJDBX (1 << 17)
|
||||
|
@ -1934,6 +1960,7 @@ FIELD(ID_MMFR4, CCIDX, 24, 4)
|
|||
FIELD(ID_MMFR4, EVT, 28, 4)
|
||||
|
||||
FIELD(ID_MMFR5, ETS, 0, 4)
|
||||
FIELD(ID_MMFR5, NTLBPA, 4, 4)
|
||||
|
||||
FIELD(ID_PFR0, STATE0, 0, 4)
|
||||
FIELD(ID_PFR0, STATE1, 4, 4)
|
||||
|
@ -1986,6 +2013,16 @@ FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
|
|||
FIELD(ID_AA64ISAR1, BF16, 44, 4)
|
||||
FIELD(ID_AA64ISAR1, DGH, 48, 4)
|
||||
FIELD(ID_AA64ISAR1, I8MM, 52, 4)
|
||||
FIELD(ID_AA64ISAR1, XS, 56, 4)
|
||||
FIELD(ID_AA64ISAR1, LS64, 60, 4)
|
||||
|
||||
FIELD(ID_AA64ISAR2, WFXT, 0, 4)
|
||||
FIELD(ID_AA64ISAR2, RPRES, 4, 4)
|
||||
FIELD(ID_AA64ISAR2, GPA3, 8, 4)
|
||||
FIELD(ID_AA64ISAR2, APA3, 12, 4)
|
||||
FIELD(ID_AA64ISAR2, MOPS, 16, 4)
|
||||
FIELD(ID_AA64ISAR2, BC, 20, 4)
|
||||
FIELD(ID_AA64ISAR2, PAC_FRAC, 24, 4)
|
||||
|
||||
FIELD(ID_AA64PFR0, EL0, 0, 4)
|
||||
FIELD(ID_AA64PFR0, EL1, 4, 4)
|
||||
|
@ -2008,6 +2045,10 @@ FIELD(ID_AA64PFR1, SSBS, 4, 4)
|
|||
FIELD(ID_AA64PFR1, MTE, 8, 4)
|
||||
FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4)
|
||||
FIELD(ID_AA64PFR1, MPAM_FRAC, 16, 4)
|
||||
FIELD(ID_AA64PFR1, SME, 24, 4)
|
||||
FIELD(ID_AA64PFR1, RNDR_TRAP, 28, 4)
|
||||
FIELD(ID_AA64PFR1, CSV2_FRAC, 32, 4)
|
||||
FIELD(ID_AA64PFR1, NMI, 36, 4)
|
||||
|
||||
FIELD(ID_AA64MMFR0, PARANGE, 0, 4)
|
||||
FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4)
|
||||
|
@ -2034,6 +2075,11 @@ FIELD(ID_AA64MMFR1, SPECSEI, 24, 4)
|
|||
FIELD(ID_AA64MMFR1, XNX, 28, 4)
|
||||
FIELD(ID_AA64MMFR1, TWED, 32, 4)
|
||||
FIELD(ID_AA64MMFR1, ETS, 36, 4)
|
||||
FIELD(ID_AA64MMFR1, HCX, 40, 4)
|
||||
FIELD(ID_AA64MMFR1, AFP, 44, 4)
|
||||
FIELD(ID_AA64MMFR1, NTLBPA, 48, 4)
|
||||
FIELD(ID_AA64MMFR1, TIDCP1, 52, 4)
|
||||
FIELD(ID_AA64MMFR1, CMOW, 56, 4)
|
||||
|
||||
FIELD(ID_AA64MMFR2, CNP, 0, 4)
|
||||
FIELD(ID_AA64MMFR2, UAO, 4, 4)
|
||||
|
@ -2060,7 +2106,10 @@ FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4)
|
|||
FIELD(ID_AA64DFR0, PMSVER, 32, 4)
|
||||
FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4)
|
||||
FIELD(ID_AA64DFR0, TRACEFILT, 40, 4)
|
||||
FIELD(ID_AA64DFR0, TRACEBUFFER, 44, 4)
|
||||
FIELD(ID_AA64DFR0, MTPMU, 48, 4)
|
||||
FIELD(ID_AA64DFR0, BRBE, 52, 4)
|
||||
FIELD(ID_AA64DFR0, HPMN0, 60, 4)
|
||||
|
||||
FIELD(ID_AA64ZFR0, SVEVER, 0, 4)
|
||||
FIELD(ID_AA64ZFR0, AES, 4, 4)
|
||||
|
@ -2082,6 +2131,7 @@ FIELD(ID_DFR0, PERFMON, 24, 4)
|
|||
FIELD(ID_DFR0, TRACEFILT, 28, 4)
|
||||
|
||||
FIELD(ID_DFR1, MTPMU, 0, 4)
|
||||
FIELD(ID_DFR1, HPMN0, 4, 4)
|
||||
|
||||
FIELD(DBGDIDR, SE_IMP, 12, 1)
|
||||
FIELD(DBGDIDR, NSUHD_IMP, 14, 1)
|
||||
|
@ -2757,11 +2807,6 @@ typedef enum CPAccessResult {
|
|||
/* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
|
||||
CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
|
||||
CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
|
||||
/* Access fails and results in an exception syndrome for an FP access,
|
||||
* trapped directly to EL2 or EL3
|
||||
*/
|
||||
CP_ACCESS_TRAP_FP_EL2 = 7,
|
||||
CP_ACCESS_TRAP_FP_EL3 = 8,
|
||||
} CPAccessResult;
|
||||
|
||||
/* Access functions for coprocessor registers. These cannot fail and
|
||||
|
|
|
@ -952,7 +952,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
|
|||
qemu_mutex_unlock_iothread();
|
||||
|
||||
if (!return_to_aa64) {
|
||||
env->aarch64 = 0;
|
||||
env->aarch64 = false;
|
||||
/* We do a raw CPSR write because aarch64_sync_64_to_32()
|
||||
* will sort the register banks out for us, and we've already
|
||||
* caught all the bad-mode cases in el_from_spsr().
|
||||
|
@ -975,7 +975,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
|
|||
} else {
|
||||
int tbii;
|
||||
|
||||
env->aarch64 = 1;
|
||||
env->aarch64 = true;
|
||||
spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar);
|
||||
pstate_write(env, spsr);
|
||||
if (!arm_singlestep_active(env)) {
|
||||
|
|
|
@ -4784,18 +4784,6 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
}
|
||||
}
|
||||
|
||||
static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
|
||||
return CP_ACCESS_TRAP_FP_EL2;
|
||||
}
|
||||
if (env->cp15.cptr_el[3] & CPTR_TFP) {
|
||||
return CP_ACCESS_TRAP_FP_EL3;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
|
@ -5097,9 +5085,8 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
|
|||
.access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
|
||||
{ .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
|
||||
.type = ARM_CP_ALIAS,
|
||||
.fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
|
||||
.access = PL2_RW, .accessfn = fpexc32_access },
|
||||
.access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_FPU,
|
||||
.fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
|
||||
{ .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
|
||||
.access = PL2_RW, .resetvalue = 0,
|
||||
|
@ -10181,7 +10168,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
|
|||
}
|
||||
|
||||
pstate_write(env, PSTATE_DAIF | new_mode);
|
||||
env->aarch64 = 1;
|
||||
env->aarch64 = true;
|
||||
aarch64_restore_sp(env, new_el);
|
||||
helper_rebuild_hflags_a64(env, new_el);
|
||||
|
||||
|
|
|
@ -564,7 +564,7 @@ int hvf_arch_init_vcpu(CPUState *cpu)
|
|||
hv_return_t ret;
|
||||
int i;
|
||||
|
||||
env->aarch64 = 1;
|
||||
env->aarch64 = true;
|
||||
asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
|
||||
|
||||
/* Allocate enough space for our sysreg sync */
|
||||
|
|
|
@ -564,7 +564,7 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
|
|||
env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
|
||||
}
|
||||
switch_v7m_security_state(env, dest & 1);
|
||||
env->thumb = 1;
|
||||
env->thumb = true;
|
||||
env->regs[15] = dest & ~1;
|
||||
arm_rebuild_hflags(env);
|
||||
}
|
||||
|
@ -590,7 +590,7 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
|
|||
* except that the low bit doesn't indicate Thumb/not.
|
||||
*/
|
||||
env->regs[14] = nextinst;
|
||||
env->thumb = 1;
|
||||
env->thumb = true;
|
||||
env->regs[15] = dest & ~1;
|
||||
return;
|
||||
}
|
||||
|
@ -626,7 +626,7 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
|
|||
}
|
||||
env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
|
||||
switch_v7m_security_state(env, 0);
|
||||
env->thumb = 1;
|
||||
env->thumb = true;
|
||||
env->regs[15] = dest;
|
||||
arm_rebuild_hflags(env);
|
||||
}
|
||||
|
|
|
@ -691,19 +691,6 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
|
|||
target_el = 3;
|
||||
syndrome = syn_uncategorized();
|
||||
break;
|
||||
case CP_ACCESS_TRAP_FP_EL2:
|
||||
target_el = 2;
|
||||
/* Since we are an implementation that takes exceptions on a trapped
|
||||
* conditional insn only if the insn has passed its condition code
|
||||
* check, we take the IMPDEF choice to always report CV=1 COND=0xe
|
||||
* (which is also the required value for AArch64 traps).
|
||||
*/
|
||||
syndrome = syn_fp_access_trap(1, 0xe, false);
|
||||
break;
|
||||
case CP_ACCESS_TRAP_FP_EL3:
|
||||
target_el = 3;
|
||||
syndrome = syn_fp_access_trap(1, 0xe, false);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
|
|
@ -61,17 +61,14 @@ static inline TCGv_i32 load_cpu_offset(int offset)
|
|||
|
||||
#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
|
||||
|
||||
static inline void store_cpu_offset(TCGv_i32 var, int offset)
|
||||
{
|
||||
tcg_gen_st_i32(var, cpu_env, offset);
|
||||
tcg_temp_free_i32(var);
|
||||
}
|
||||
void store_cpu_offset(TCGv_i32 var, int offset, int size);
|
||||
|
||||
#define store_cpu_field(var, name) \
|
||||
store_cpu_offset(var, offsetof(CPUARMState, name))
|
||||
#define store_cpu_field(var, name) \
|
||||
store_cpu_offset(var, offsetof(CPUARMState, name), \
|
||||
sizeof_field(CPUARMState, name))
|
||||
|
||||
#define store_cpu_field_constant(val, name) \
|
||||
tcg_gen_st_i32(tcg_constant_i32(val), cpu_env, offsetof(CPUARMState, name))
|
||||
store_cpu_field(tcg_constant_i32(val), name)
|
||||
|
||||
/* Create a new temporary and set it to the value of a CPU register. */
|
||||
static inline TCGv_i32 load_reg(DisasContext *s, int reg)
|
||||
|
|
|
@ -128,29 +128,28 @@ static int get_a64_user_mem_index(DisasContext *s)
|
|||
return arm_to_core_mmu_idx(useridx);
|
||||
}
|
||||
|
||||
static void reset_btype(DisasContext *s)
|
||||
static void set_btype_raw(int val)
|
||||
{
|
||||
if (s->btype != 0) {
|
||||
TCGv_i32 zero = tcg_const_i32(0);
|
||||
tcg_gen_st_i32(zero, cpu_env, offsetof(CPUARMState, btype));
|
||||
tcg_temp_free_i32(zero);
|
||||
s->btype = 0;
|
||||
}
|
||||
tcg_gen_st_i32(tcg_constant_i32(val), cpu_env,
|
||||
offsetof(CPUARMState, btype));
|
||||
}
|
||||
|
||||
static void set_btype(DisasContext *s, int val)
|
||||
{
|
||||
TCGv_i32 tcg_val;
|
||||
|
||||
/* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
|
||||
tcg_debug_assert(val >= 1 && val <= 3);
|
||||
|
||||
tcg_val = tcg_const_i32(val);
|
||||
tcg_gen_st_i32(tcg_val, cpu_env, offsetof(CPUARMState, btype));
|
||||
tcg_temp_free_i32(tcg_val);
|
||||
set_btype_raw(val);
|
||||
s->btype = -1;
|
||||
}
|
||||
|
||||
static void reset_btype(DisasContext *s)
|
||||
{
|
||||
if (s->btype != 0) {
|
||||
set_btype_raw(0);
|
||||
s->btype = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void gen_a64_set_pc_im(uint64_t val)
|
||||
{
|
||||
tcg_gen_movi_i64(cpu_pc, val);
|
||||
|
@ -342,6 +341,11 @@ static void a64_free_cc(DisasCompare64 *c64)
|
|||
tcg_temp_free_i64(c64->value);
|
||||
}
|
||||
|
||||
static void gen_rebuild_hflags(DisasContext *s)
|
||||
{
|
||||
gen_helper_rebuild_hflags_a64(cpu_env, tcg_constant_i32(s->current_el));
|
||||
}
|
||||
|
||||
static void gen_exception_internal(int excp)
|
||||
{
|
||||
TCGv_i32 tcg_excp = tcg_const_i32(excp);
|
||||
|
@ -1668,9 +1672,7 @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
|
|||
} else {
|
||||
clear_pstate_bits(PSTATE_UAO);
|
||||
}
|
||||
t1 = tcg_const_i32(s->current_el);
|
||||
gen_helper_rebuild_hflags_a64(cpu_env, t1);
|
||||
tcg_temp_free_i32(t1);
|
||||
gen_rebuild_hflags(s);
|
||||
break;
|
||||
|
||||
case 0x04: /* PAN */
|
||||
|
@ -1682,9 +1684,7 @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
|
|||
} else {
|
||||
clear_pstate_bits(PSTATE_PAN);
|
||||
}
|
||||
t1 = tcg_const_i32(s->current_el);
|
||||
gen_helper_rebuild_hflags_a64(cpu_env, t1);
|
||||
tcg_temp_free_i32(t1);
|
||||
gen_rebuild_hflags(s);
|
||||
break;
|
||||
|
||||
case 0x05: /* SPSel */
|
||||
|
@ -1742,9 +1742,7 @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
|
|||
} else {
|
||||
clear_pstate_bits(PSTATE_TCO);
|
||||
}
|
||||
t1 = tcg_const_i32(s->current_el);
|
||||
gen_helper_rebuild_hflags_a64(cpu_env, t1);
|
||||
tcg_temp_free_i32(t1);
|
||||
gen_rebuild_hflags(s);
|
||||
/* Many factors, including TCO, go into MTE_ACTIVE. */
|
||||
s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
|
||||
} else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
|
||||
|
@ -1991,9 +1989,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
|
|||
* A write to any coprocessor regiser that ends a TB
|
||||
* must rebuild the hflags for the next TB.
|
||||
*/
|
||||
TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
|
||||
gen_helper_rebuild_hflags_a64(cpu_env, tcg_el);
|
||||
tcg_temp_free_i32(tcg_el);
|
||||
gen_rebuild_hflags(s);
|
||||
/*
|
||||
* We default to ending the TB on a coprocessor register write,
|
||||
* but allow this to be suppressed by the register definition
|
||||
|
@ -14664,13 +14660,13 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
|||
dc->isar = &arm_cpu->isar;
|
||||
dc->condjmp = 0;
|
||||
|
||||
dc->aarch64 = 1;
|
||||
dc->aarch64 = true;
|
||||
/* If we are coming from secure EL0 in a system with a 32-bit EL3, then
|
||||
* there is no secure EL1, so we route exceptions to EL3.
|
||||
*/
|
||||
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
|
||||
!arm_el_is_aa64(env, 3);
|
||||
dc->thumb = 0;
|
||||
dc->thumb = false;
|
||||
dc->sctlr_b = 0;
|
||||
dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
|
||||
dc->condexec_mask = 0;
|
||||
|
|
|
@ -173,7 +173,7 @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
|
|||
}
|
||||
|
||||
/* Zero the Sregs from btmreg to topreg inclusive. */
|
||||
zero = tcg_const_i64(0);
|
||||
zero = tcg_constant_i64(0);
|
||||
if (btmreg & 1) {
|
||||
write_neon_element64(zero, btmreg >> 1, 1, MO_32);
|
||||
btmreg++;
|
||||
|
@ -187,8 +187,7 @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
|
|||
}
|
||||
assert(btmreg == topreg + 1);
|
||||
if (dc_isar_feature(aa32_mve, s)) {
|
||||
TCGv_i32 z32 = tcg_const_i32(0);
|
||||
store_cpu_field(z32, v7m.vpr);
|
||||
store_cpu_field(tcg_constant_i32(0), v7m.vpr);
|
||||
}
|
||||
|
||||
clear_eci_state(s);
|
||||
|
@ -512,7 +511,7 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
|
|||
}
|
||||
case ARM_VFP_FPCXT_NS:
|
||||
{
|
||||
TCGv_i32 control, sfpa, fpscr, fpdscr, zero;
|
||||
TCGv_i32 control, sfpa, fpscr, fpdscr;
|
||||
TCGLabel *lab_active = gen_new_label();
|
||||
|
||||
lookup_tb = true;
|
||||
|
@ -552,10 +551,9 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
|
|||
storefn(s, opaque, tmp, true);
|
||||
/* If SFPA is zero then set FPSCR from FPDSCR_NS */
|
||||
fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
|
||||
zero = tcg_const_i32(0);
|
||||
tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr);
|
||||
tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, tcg_constant_i32(0),
|
||||
fpdscr, fpscr);
|
||||
gen_helper_vfp_set_fpscr(cpu_env, fpscr);
|
||||
tcg_temp_free_i32(zero);
|
||||
tcg_temp_free_i32(sfpa);
|
||||
tcg_temp_free_i32(fpdscr);
|
||||
tcg_temp_free_i32(fpscr);
|
||||
|
|
|
@ -447,7 +447,7 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
|
|||
int mmu_idx = get_mem_index(s);
|
||||
int size = a->size;
|
||||
TCGv_i64 tmp64;
|
||||
TCGv_i32 addr, tmp;
|
||||
TCGv_i32 addr;
|
||||
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
|
||||
return false;
|
||||
|
@ -513,7 +513,6 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
|
|||
|
||||
tmp64 = tcg_temp_new_i64();
|
||||
addr = tcg_temp_new_i32();
|
||||
tmp = tcg_const_i32(1 << size);
|
||||
load_reg_var(s, addr, a->rn);
|
||||
|
||||
mop = endian | size | align;
|
||||
|
@ -530,7 +529,7 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
|
|||
neon_load_element64(tmp64, tt, n, size);
|
||||
gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop);
|
||||
}
|
||||
tcg_gen_add_i32(addr, addr, tmp);
|
||||
tcg_gen_addi_i32(addr, addr, 1 << size);
|
||||
|
||||
/* Subsequent memory operations inherit alignment */
|
||||
mop &= ~MO_AMASK;
|
||||
|
@ -538,7 +537,6 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
|
|||
}
|
||||
}
|
||||
tcg_temp_free_i32(addr);
|
||||
tcg_temp_free_i32(tmp);
|
||||
tcg_temp_free_i64(tmp64);
|
||||
|
||||
gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
|
||||
|
@ -1348,7 +1346,7 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
|
|||
* To avoid excessive duplication of ops we implement shift
|
||||
* by immediate using the variable shift operations.
|
||||
*/
|
||||
constimm = tcg_const_i64(dup_const(a->size, a->shift));
|
||||
constimm = tcg_constant_i64(dup_const(a->size, a->shift));
|
||||
|
||||
for (pass = 0; pass < a->q + 1; pass++) {
|
||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||
|
@ -1358,7 +1356,6 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
|
|||
write_neon_element64(tmp, a->vd, pass, MO_64);
|
||||
tcg_temp_free_i64(tmp);
|
||||
}
|
||||
tcg_temp_free_i64(constimm);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1394,7 +1391,7 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
|
|||
* To avoid excessive duplication of ops we implement shift
|
||||
* by immediate using the variable shift operations.
|
||||
*/
|
||||
constimm = tcg_const_i32(dup_const(a->size, a->shift));
|
||||
constimm = tcg_constant_i32(dup_const(a->size, a->shift));
|
||||
tmp = tcg_temp_new_i32();
|
||||
|
||||
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
|
||||
|
@ -1403,7 +1400,6 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
|
|||
write_neon_element32(tmp, a->vd, pass, MO_32);
|
||||
}
|
||||
tcg_temp_free_i32(tmp);
|
||||
tcg_temp_free_i32(constimm);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1457,7 +1453,7 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
|
|||
* This is always a right shift, and the shiftfn is always a
|
||||
* left-shift helper, which thus needs the negated shift count.
|
||||
*/
|
||||
constimm = tcg_const_i64(-a->shift);
|
||||
constimm = tcg_constant_i64(-a->shift);
|
||||
rm1 = tcg_temp_new_i64();
|
||||
rm2 = tcg_temp_new_i64();
|
||||
rd = tcg_temp_new_i32();
|
||||
|
@ -1477,7 +1473,6 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
|
|||
tcg_temp_free_i32(rd);
|
||||
tcg_temp_free_i64(rm1);
|
||||
tcg_temp_free_i64(rm2);
|
||||
tcg_temp_free_i64(constimm);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1521,7 +1516,7 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
|
|||
/* size == 2 */
|
||||
imm = -a->shift;
|
||||
}
|
||||
constimm = tcg_const_i32(imm);
|
||||
constimm = tcg_constant_i32(imm);
|
||||
|
||||
/* Load all inputs first to avoid potential overwrite */
|
||||
rm1 = tcg_temp_new_i32();
|
||||
|
@ -1546,7 +1541,6 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
|
|||
|
||||
shiftfn(rm3, rm3, constimm);
|
||||
shiftfn(rm4, rm4, constimm);
|
||||
tcg_temp_free_i32(constimm);
|
||||
|
||||
tcg_gen_concat_i32_i64(rtmp, rm3, rm4);
|
||||
tcg_temp_free_i32(rm4);
|
||||
|
@ -2911,7 +2905,7 @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
|
|||
return true;
|
||||
}
|
||||
|
||||
desc = tcg_const_i32((a->vn << 2) | a->len);
|
||||
desc = tcg_constant_i32((a->vn << 2) | a->len);
|
||||
def = tcg_temp_new_i64();
|
||||
if (a->op) {
|
||||
read_neon_element64(def, a->vd, 0, MO_64);
|
||||
|
@ -2926,7 +2920,6 @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
|
|||
|
||||
tcg_temp_free_i64(def);
|
||||
tcg_temp_free_i64(val);
|
||||
tcg_temp_free_i32(desc);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -1916,8 +1916,6 @@ static bool trans_PNEXT(DisasContext *s, arg_rr_esz *a)
|
|||
static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
|
||||
{
|
||||
int64_t ibound;
|
||||
TCGv_i64 bound;
|
||||
TCGCond cond;
|
||||
|
||||
/* Use normal 64-bit arithmetic to detect 32-bit overflow. */
|
||||
if (u) {
|
||||
|
@ -1928,15 +1926,12 @@ static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
|
|||
if (d) {
|
||||
tcg_gen_sub_i64(reg, reg, val);
|
||||
ibound = (u ? 0 : INT32_MIN);
|
||||
cond = TCG_COND_LT;
|
||||
tcg_gen_smax_i64(reg, reg, tcg_constant_i64(ibound));
|
||||
} else {
|
||||
tcg_gen_add_i64(reg, reg, val);
|
||||
ibound = (u ? UINT32_MAX : INT32_MAX);
|
||||
cond = TCG_COND_GT;
|
||||
tcg_gen_smin_i64(reg, reg, tcg_constant_i64(ibound));
|
||||
}
|
||||
bound = tcg_const_i64(ibound);
|
||||
tcg_gen_movcond_i64(cond, reg, reg, bound, bound, reg);
|
||||
tcg_temp_free_i64(bound);
|
||||
}
|
||||
|
||||
/* Similarly with 64-bit values. */
|
||||
|
|
|
@ -180,8 +180,7 @@ static void gen_update_fp_context(DisasContext *s)
|
|||
gen_helper_vfp_set_fpscr(cpu_env, fpscr);
|
||||
tcg_temp_free_i32(fpscr);
|
||||
if (dc_isar_feature(aa32_mve, s)) {
|
||||
TCGv_i32 z32 = tcg_const_i32(0);
|
||||
store_cpu_field(z32, v7m.vpr);
|
||||
store_cpu_field(tcg_constant_i32(0), v7m.vpr);
|
||||
}
|
||||
/*
|
||||
* We just updated the FPSCR and VPR. Some of this state is cached
|
||||
|
@ -317,7 +316,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
|
|||
TCGv_i64 frn, frm, dest;
|
||||
TCGv_i64 tmp, zero, zf, nf, vf;
|
||||
|
||||
zero = tcg_const_i64(0);
|
||||
zero = tcg_constant_i64(0);
|
||||
|
||||
frn = tcg_temp_new_i64();
|
||||
frm = tcg_temp_new_i64();
|
||||
|
@ -335,27 +334,22 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
|
|||
vfp_load_reg64(frm, rm);
|
||||
switch (a->cc) {
|
||||
case 0: /* eq: Z */
|
||||
tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
|
||||
frn, frm);
|
||||
tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, frn, frm);
|
||||
break;
|
||||
case 1: /* vs: V */
|
||||
tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
|
||||
frn, frm);
|
||||
tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero, frn, frm);
|
||||
break;
|
||||
case 2: /* ge: N == V -> N ^ V == 0 */
|
||||
tmp = tcg_temp_new_i64();
|
||||
tcg_gen_xor_i64(tmp, vf, nf);
|
||||
tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
|
||||
frn, frm);
|
||||
tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, frn, frm);
|
||||
tcg_temp_free_i64(tmp);
|
||||
break;
|
||||
case 3: /* gt: !Z && N == V */
|
||||
tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
|
||||
frn, frm);
|
||||
tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero, frn, frm);
|
||||
tmp = tcg_temp_new_i64();
|
||||
tcg_gen_xor_i64(tmp, vf, nf);
|
||||
tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
|
||||
dest, frm);
|
||||
tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, dest, frm);
|
||||
tcg_temp_free_i64(tmp);
|
||||
break;
|
||||
}
|
||||
|
@ -367,13 +361,11 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
|
|||
tcg_temp_free_i64(zf);
|
||||
tcg_temp_free_i64(nf);
|
||||
tcg_temp_free_i64(vf);
|
||||
|
||||
tcg_temp_free_i64(zero);
|
||||
} else {
|
||||
TCGv_i32 frn, frm, dest;
|
||||
TCGv_i32 tmp, zero;
|
||||
|
||||
zero = tcg_const_i32(0);
|
||||
zero = tcg_constant_i32(0);
|
||||
|
||||
frn = tcg_temp_new_i32();
|
||||
frm = tcg_temp_new_i32();
|
||||
|
@ -382,27 +374,22 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
|
|||
vfp_load_reg32(frm, rm);
|
||||
switch (a->cc) {
|
||||
case 0: /* eq: Z */
|
||||
tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
|
||||
frn, frm);
|
||||
tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, frn, frm);
|
||||
break;
|
||||
case 1: /* vs: V */
|
||||
tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
|
||||
frn, frm);
|
||||
tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero, frn, frm);
|
||||
break;
|
||||
case 2: /* ge: N == V -> N ^ V == 0 */
|
||||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
|
||||
tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
|
||||
frn, frm);
|
||||
tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, frn, frm);
|
||||
tcg_temp_free_i32(tmp);
|
||||
break;
|
||||
case 3: /* gt: !Z && N == V */
|
||||
tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
|
||||
frn, frm);
|
||||
tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero, frn, frm);
|
||||
tmp = tcg_temp_new_i32();
|
||||
tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
|
||||
tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
|
||||
dest, frm);
|
||||
tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, dest, frm);
|
||||
tcg_temp_free_i32(tmp);
|
||||
break;
|
||||
}
|
||||
|
@ -414,8 +401,6 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
|
|||
tcg_temp_free_i32(frn);
|
||||
tcg_temp_free_i32(frm);
|
||||
tcg_temp_free_i32(dest);
|
||||
|
||||
tcg_temp_free_i32(zero);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -547,7 +532,7 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
|
|||
fpst = fpstatus_ptr(FPST_FPCR);
|
||||
}
|
||||
|
||||
tcg_shift = tcg_const_i32(0);
|
||||
tcg_shift = tcg_constant_i32(0);
|
||||
|
||||
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
|
||||
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
||||
|
@ -595,8 +580,6 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
|
|||
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
||||
tcg_temp_free_i32(tcg_rmode);
|
||||
|
||||
tcg_temp_free_i32(tcg_shift);
|
||||
|
||||
tcg_temp_free_ptr(fpst);
|
||||
|
||||
return true;
|
||||
|
@ -850,15 +833,11 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
|
|||
case ARM_VFP_MVFR2:
|
||||
case ARM_VFP_FPSID:
|
||||
if (s->current_el == 1) {
|
||||
TCGv_i32 tcg_reg, tcg_rt;
|
||||
|
||||
gen_set_condexec(s);
|
||||
gen_set_pc_im(s, s->pc_curr);
|
||||
tcg_reg = tcg_const_i32(a->reg);
|
||||
tcg_rt = tcg_const_i32(a->rt);
|
||||
gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, tcg_reg);
|
||||
tcg_temp_free_i32(tcg_reg);
|
||||
tcg_temp_free_i32(tcg_rt);
|
||||
gen_helper_check_hcr_el2_trap(cpu_env,
|
||||
tcg_constant_i32(a->rt),
|
||||
tcg_constant_i32(a->reg));
|
||||
}
|
||||
/* fall through */
|
||||
case ARM_VFP_FPEXC:
|
||||
|
@ -2388,8 +2367,6 @@ MAKE_VFM_TRANS_FNS(dp)
|
|||
|
||||
static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
|
||||
{
|
||||
TCGv_i32 fd;
|
||||
|
||||
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -2402,9 +2379,7 @@ static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
|
|||
return true;
|
||||
}
|
||||
|
||||
fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm));
|
||||
vfp_store_reg32(fd, a->vd);
|
||||
tcg_temp_free_i32(fd);
|
||||
vfp_store_reg32(tcg_constant_i32(vfp_expand_imm(MO_16, a->imm)), a->vd);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2440,7 +2415,7 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
|
|||
}
|
||||
}
|
||||
|
||||
fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
|
||||
fd = tcg_constant_i32(vfp_expand_imm(MO_32, a->imm));
|
||||
|
||||
for (;;) {
|
||||
vfp_store_reg32(fd, vd);
|
||||
|
@ -2454,7 +2429,6 @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
|
|||
vd = vfp_advance_sreg(vd, delta_d);
|
||||
}
|
||||
|
||||
tcg_temp_free_i32(fd);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2495,7 +2469,7 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
|
|||
}
|
||||
}
|
||||
|
||||
fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
|
||||
fd = tcg_constant_i64(vfp_expand_imm(MO_64, a->imm));
|
||||
|
||||
for (;;) {
|
||||
vfp_store_reg64(fd, vd);
|
||||
|
@ -2509,7 +2483,6 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
|
|||
vd = vfp_advance_dreg(vd, delta_d);
|
||||
}
|
||||
|
||||
tcg_temp_free_i64(fd);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -3294,7 +3267,7 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
|
|||
vfp_load_reg32(vd, a->vd);
|
||||
|
||||
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
||||
shift = tcg_const_i32(frac_bits);
|
||||
shift = tcg_constant_i32(frac_bits);
|
||||
|
||||
/* Switch on op:U:sx bits */
|
||||
switch (a->opc) {
|
||||
|
@ -3328,7 +3301,6 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
|
|||
|
||||
vfp_store_reg32(vd, a->vd);
|
||||
tcg_temp_free_i32(vd);
|
||||
tcg_temp_free_i32(shift);
|
||||
tcg_temp_free_ptr(fpst);
|
||||
return true;
|
||||
}
|
||||
|
@ -3353,7 +3325,7 @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
|
|||
vfp_load_reg32(vd, a->vd);
|
||||
|
||||
fpst = fpstatus_ptr(FPST_FPCR);
|
||||
shift = tcg_const_i32(frac_bits);
|
||||
shift = tcg_constant_i32(frac_bits);
|
||||
|
||||
/* Switch on op:U:sx bits */
|
||||
switch (a->opc) {
|
||||
|
@ -3387,7 +3359,6 @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
|
|||
|
||||
vfp_store_reg32(vd, a->vd);
|
||||
tcg_temp_free_i32(vd);
|
||||
tcg_temp_free_i32(shift);
|
||||
tcg_temp_free_ptr(fpst);
|
||||
return true;
|
||||
}
|
||||
|
@ -3418,7 +3389,7 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
|
|||
vfp_load_reg64(vd, a->vd);
|
||||
|
||||
fpst = fpstatus_ptr(FPST_FPCR);
|
||||
shift = tcg_const_i32(frac_bits);
|
||||
shift = tcg_constant_i32(frac_bits);
|
||||
|
||||
/* Switch on op:U:sx bits */
|
||||
switch (a->opc) {
|
||||
|
@ -3452,7 +3423,6 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
|
|||
|
||||
vfp_store_reg64(vd, a->vd);
|
||||
tcg_temp_free_i64(vd);
|
||||
tcg_temp_free_i32(shift);
|
||||
tcg_temp_free_ptr(fpst);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -180,6 +180,25 @@ typedef enum ISSInfo {
|
|||
ISSIs16Bit = (1 << 8),
|
||||
} ISSInfo;
|
||||
|
||||
/*
|
||||
* Store var into env + offset to a member with size bytes.
|
||||
* Free var after use.
|
||||
*/
|
||||
void store_cpu_offset(TCGv_i32 var, int offset, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
tcg_gen_st8_i32(var, cpu_env, offset);
|
||||
break;
|
||||
case 4:
|
||||
tcg_gen_st_i32(var, cpu_env, offset);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
tcg_temp_free_i32(var);
|
||||
}
|
||||
|
||||
/* Save the syndrome information for a Data Abort */
|
||||
static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
|
||||
{
|
||||
|
@ -332,6 +351,26 @@ void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
|
|||
tcg_temp_free_i32(tmp_mask);
|
||||
}
|
||||
|
||||
static void gen_rebuild_hflags(DisasContext *s, bool new_el)
|
||||
{
|
||||
bool m_profile = arm_dc_feature(s, ARM_FEATURE_M);
|
||||
|
||||
if (new_el) {
|
||||
if (m_profile) {
|
||||
gen_helper_rebuild_hflags_m32_newel(cpu_env);
|
||||
} else {
|
||||
gen_helper_rebuild_hflags_a32_newel(cpu_env);
|
||||
}
|
||||
} else {
|
||||
TCGv_i32 tcg_el = tcg_constant_i32(s->current_el);
|
||||
if (m_profile) {
|
||||
gen_helper_rebuild_hflags_m32(cpu_env, tcg_el);
|
||||
} else {
|
||||
gen_helper_rebuild_hflags_a32(cpu_env, tcg_el);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_exception_internal(int excp)
|
||||
{
|
||||
TCGv_i32 tcg_excp = tcg_const_i32(excp);
|
||||
|
@ -513,16 +552,14 @@ static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
|
|||
#define GEN_SHIFT(name) \
|
||||
static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
|
||||
{ \
|
||||
TCGv_i32 tmp1, tmp2, tmp3; \
|
||||
tmp1 = tcg_temp_new_i32(); \
|
||||
tcg_gen_andi_i32(tmp1, t1, 0xff); \
|
||||
tmp2 = tcg_const_i32(0); \
|
||||
tmp3 = tcg_const_i32(0x1f); \
|
||||
tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
|
||||
tcg_temp_free_i32(tmp3); \
|
||||
tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
|
||||
tcg_gen_##name##_i32(dest, tmp2, tmp1); \
|
||||
tcg_temp_free_i32(tmp2); \
|
||||
TCGv_i32 tmpd = tcg_temp_new_i32(); \
|
||||
TCGv_i32 tmp1 = tcg_temp_new_i32(); \
|
||||
TCGv_i32 zero = tcg_constant_i32(0); \
|
||||
tcg_gen_andi_i32(tmp1, t1, 0x1f); \
|
||||
tcg_gen_##name##_i32(tmpd, t0, tmp1); \
|
||||
tcg_gen_andi_i32(tmp1, t1, 0xe0); \
|
||||
tcg_gen_movcond_i32(TCG_COND_NE, dest, tmp1, zero, zero, tmpd); \
|
||||
tcg_temp_free_i32(tmpd); \
|
||||
tcg_temp_free_i32(tmp1); \
|
||||
}
|
||||
GEN_SHIFT(shl)
|
||||
|
@ -531,12 +568,10 @@ GEN_SHIFT(shr)
|
|||
|
||||
static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
|
||||
{
|
||||
TCGv_i32 tmp1, tmp2;
|
||||
tmp1 = tcg_temp_new_i32();
|
||||
TCGv_i32 tmp1 = tcg_temp_new_i32();
|
||||
|
||||
tcg_gen_andi_i32(tmp1, t1, 0xff);
|
||||
tmp2 = tcg_const_i32(0x1f);
|
||||
tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
|
||||
tcg_temp_free_i32(tmp2);
|
||||
tcg_gen_umin_i32(tmp1, tmp1, tcg_constant_i32(31));
|
||||
tcg_gen_sar_i32(dest, t0, tmp1);
|
||||
tcg_temp_free_i32(tmp1);
|
||||
}
|
||||
|
@ -4852,7 +4887,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
|
|||
tcg_temp_free_i32(tmp);
|
||||
} else {
|
||||
TCGv_i32 tmp = load_reg(s, rt);
|
||||
store_cpu_offset(tmp, ri->fieldoffset);
|
||||
store_cpu_offset(tmp, ri->fieldoffset, 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4866,17 +4901,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
|
|||
* A write to any coprocessor register that ends a TB
|
||||
* must rebuild the hflags for the next TB.
|
||||
*/
|
||||
TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
|
||||
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
gen_helper_rebuild_hflags_m32(cpu_env, tcg_el);
|
||||
} else {
|
||||
if (ri->type & ARM_CP_NEWEL) {
|
||||
gen_helper_rebuild_hflags_a32_newel(cpu_env);
|
||||
} else {
|
||||
gen_helper_rebuild_hflags_a32(cpu_env, tcg_el);
|
||||
}
|
||||
}
|
||||
tcg_temp_free_i32(tcg_el);
|
||||
gen_rebuild_hflags(s, ri->type & ARM_CP_NEWEL);
|
||||
/*
|
||||
* We default to ending the TB on a coprocessor register write,
|
||||
* but allow this to be suppressed by the register definition
|
||||
|
@ -6426,7 +6451,7 @@ static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
|
|||
tcg_temp_free_i32(addr);
|
||||
tcg_temp_free_i32(reg);
|
||||
/* If we wrote to CONTROL, the EL might have changed */
|
||||
gen_helper_rebuild_hflags_m32_newel(cpu_env);
|
||||
gen_rebuild_hflags(s, true);
|
||||
gen_lookup_tb(s);
|
||||
return true;
|
||||
}
|
||||
|
@ -8878,7 +8903,7 @@ static bool trans_CPS(DisasContext *s, arg_CPS *a)
|
|||
|
||||
static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
|
||||
{
|
||||
TCGv_i32 tmp, addr, el;
|
||||
TCGv_i32 tmp, addr;
|
||||
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
return false;
|
||||
|
@ -8901,9 +8926,7 @@ static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
|
|||
gen_helper_v7m_msr(cpu_env, addr, tmp);
|
||||
tcg_temp_free_i32(addr);
|
||||
}
|
||||
el = tcg_const_i32(s->current_el);
|
||||
gen_helper_rebuild_hflags_m32(cpu_env, el);
|
||||
tcg_temp_free_i32(el);
|
||||
gen_rebuild_hflags(s, false);
|
||||
tcg_temp_free_i32(tmp);
|
||||
gen_lookup_tb(s);
|
||||
return true;
|
||||
|
@ -9334,7 +9357,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
|||
dc->isar = &cpu->isar;
|
||||
dc->condjmp = 0;
|
||||
|
||||
dc->aarch64 = 0;
|
||||
dc->aarch64 = false;
|
||||
/* If we are coming from secure EL0 in a system with a 32-bit EL3, then
|
||||
* there is no secure EL1, so we route exceptions to EL3.
|
||||
*/
|
||||
|
@ -9847,18 +9870,14 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|||
/* nothing more to generate */
|
||||
break;
|
||||
case DISAS_WFI:
|
||||
{
|
||||
TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
|
||||
!(dc->insn & (1U << 31))) ? 2 : 4);
|
||||
|
||||
gen_helper_wfi(cpu_env, tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
/* The helper doesn't necessarily throw an exception, but we
|
||||
gen_helper_wfi(cpu_env,
|
||||
tcg_constant_i32(dc->base.pc_next - dc->pc_curr));
|
||||
/*
|
||||
* The helper doesn't necessarily throw an exception, but we
|
||||
* must go back to the main loop to check for interrupts anyway.
|
||||
*/
|
||||
tcg_gen_exit_tb(NULL, 0);
|
||||
break;
|
||||
}
|
||||
case DISAS_WFE:
|
||||
gen_helper_wfe(cpu_env);
|
||||
break;
|
||||
|
|
|
@ -30,7 +30,6 @@ typedef struct DisasContext {
|
|||
bool eci_handled;
|
||||
/* TCG op to rewind to if this turns out to be an invalid ECI state */
|
||||
TCGOp *insn_eci_rewind;
|
||||
int thumb;
|
||||
int sctlr_b;
|
||||
MemOp be_data;
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
@ -59,12 +58,13 @@ typedef struct DisasContext {
|
|||
* so that top level loop can generate correct syndrome information.
|
||||
*/
|
||||
uint32_t svc_imm;
|
||||
int aarch64;
|
||||
int current_el;
|
||||
/* Debug target exception level for single-step exceptions */
|
||||
int debug_target_el;
|
||||
GHashTable *cp_regs;
|
||||
uint64_t features; /* CPU features bits */
|
||||
bool aarch64;
|
||||
bool thumb;
|
||||
/* Because unallocated encodings generate different exception syndrome
|
||||
* information from traps due to FP being disabled, we can't do a single
|
||||
* "is fp access disabled" check at a high level in the decode tree.
|
||||
|
@ -332,16 +332,9 @@ static inline void gen_ss_advance(DisasContext *s)
|
|||
static inline void gen_exception(int excp, uint32_t syndrome,
|
||||
uint32_t target_el)
|
||||
{
|
||||
TCGv_i32 tcg_excp = tcg_const_i32(excp);
|
||||
TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
|
||||
TCGv_i32 tcg_el = tcg_const_i32(target_el);
|
||||
|
||||
gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
|
||||
tcg_syn, tcg_el);
|
||||
|
||||
tcg_temp_free_i32(tcg_el);
|
||||
tcg_temp_free_i32(tcg_syn);
|
||||
tcg_temp_free_i32(tcg_excp);
|
||||
gen_helper_exception_with_syndrome(cpu_env, tcg_constant_i32(excp),
|
||||
tcg_constant_i32(syndrome),
|
||||
tcg_constant_i32(target_el));
|
||||
}
|
||||
|
||||
/* Generate an architectural singlestep exception */
|
||||
|
|
Loading…
Reference in New Issue