diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index e39a810a4e..f138e7fefe 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -98,7 +98,7 @@ bool kvm_allowed; bool kvm_readonly_mem_allowed; bool kvm_vm_attributes_allowed; bool kvm_msi_use_devid; -bool kvm_has_guest_debug; +static bool kvm_has_guest_debug; static int kvm_sstep_flags; static bool kvm_immediate_exit; static hwaddr kvm_max_slot_size = ~0; diff --git a/hw/arm/virt.c b/hw/arm/virt.c index be2856c018..040ca2d794 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -1998,13 +1998,14 @@ static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem) if (pmu) { assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU)); if (kvm_irqchip_in_kernel()) { - kvm_arm_pmu_set_irq(cpu, VIRTUAL_PMU_IRQ); + kvm_arm_pmu_set_irq(ARM_CPU(cpu), VIRTUAL_PMU_IRQ); } - kvm_arm_pmu_init(cpu); + kvm_arm_pmu_init(ARM_CPU(cpu)); } if (steal_time) { - kvm_arm_pvtime_init(cpu, pvtime_reg_base + - cpu->cpu_index * PVTIME_SIZE_PER_CPU); + kvm_arm_pvtime_init(ARM_CPU(cpu), pvtime_reg_base + + cpu->cpu_index + * PVTIME_SIZE_PER_CPU); } } } else { diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index f7df602cff..3befc960db 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" +#include "qemu/error-report.h" #include "hw/intc/arm_gicv3_its_common.h" #include "hw/qdev-properties.h" #include "sysemu/runstate.h" diff --git a/hw/misc/imx7_snvs.c b/hw/misc/imx7_snvs.c index a245f96cd4..8e7f43187f 100644 --- a/hw/misc/imx7_snvs.c +++ b/hw/misc/imx7_snvs.c @@ -13,28 +13,100 @@ */ #include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/timer.h" +#include "migration/vmstate.h" #include "hw/misc/imx7_snvs.h" +#include "qemu/cutils.h" #include "qemu/module.h" +#include "sysemu/sysemu.h" +#include "sysemu/rtc.h" #include "sysemu/runstate.h" #include "trace.h" +#define RTC_FREQ 32768ULL + +static const VMStateDescription vmstate_imx7_snvs = { + .name = TYPE_IMX7_SNVS, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(tick_offset, IMX7SNVSState), + VMSTATE_UINT64(lpcr, IMX7SNVSState), + VMSTATE_END_OF_LIST() + } +}; + +static uint64_t imx7_snvs_get_count(IMX7SNVSState *s) +{ + uint64_t ticks = muldiv64(qemu_clock_get_ns(rtc_clock), RTC_FREQ, + NANOSECONDS_PER_SECOND); + return s->tick_offset + ticks; +} + static uint64_t imx7_snvs_read(void *opaque, hwaddr offset, unsigned size) { - trace_imx7_snvs_read(offset, 0); + IMX7SNVSState *s = IMX7_SNVS(opaque); + uint64_t ret = 0; - return 0; + switch (offset) { + case SNVS_LPSRTCMR: + ret = extract64(imx7_snvs_get_count(s), 32, 15); + break; + case SNVS_LPSRTCLR: + ret = extract64(imx7_snvs_get_count(s), 0, 32); + break; + case SNVS_LPCR: + ret = s->lpcr; + break; + } + + trace_imx7_snvs_read(offset, ret, size); + + return ret; +} + +static void imx7_snvs_reset(DeviceState *dev) +{ + IMX7SNVSState *s = IMX7_SNVS(dev); + + s->lpcr = 0; } static void imx7_snvs_write(void *opaque, hwaddr offset, uint64_t v, unsigned size) { - const uint32_t value = v; - const uint32_t mask = SNVS_LPCR_TOP | SNVS_LPCR_DP_EN; + trace_imx7_snvs_write(offset, v, size); - trace_imx7_snvs_write(offset, value); + IMX7SNVSState *s = IMX7_SNVS(opaque); - if (offset == SNVS_LPCR && ((value & mask) == mask)) { - qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + uint64_t new_value = 0, snvs_count = 0; + + if (offset == SNVS_LPSRTCMR || offset == SNVS_LPSRTCLR) { + snvs_count = imx7_snvs_get_count(s); + } + + switch (offset) { + case SNVS_LPSRTCMR: + new_value = deposit64(snvs_count, 32, 32, v); + break; + case SNVS_LPSRTCLR: + new_value = deposit64(snvs_count, 0, 32, v); + break; + case SNVS_LPCR: { + s->lpcr = v; + + const uint32_t mask = SNVS_LPCR_TOP | SNVS_LPCR_DP_EN; + + if ((v & mask) == mask) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } + break; + } + } + + if (offset == SNVS_LPSRTCMR || offset == SNVS_LPSRTCLR) { + s->tick_offset += new_value - snvs_count; } } @@ -59,17 +131,24 @@ static void imx7_snvs_init(Object *obj) { SysBusDevice *sd = SYS_BUS_DEVICE(obj); IMX7SNVSState *s = IMX7_SNVS(obj); + struct tm tm; memory_region_init_io(&s->mmio, obj, &imx7_snvs_ops, s, TYPE_IMX7_SNVS, 0x1000); sysbus_init_mmio(sd, &s->mmio); + + qemu_get_timedate(&tm, 0); + s->tick_offset = mktimegm(&tm) - + qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND; } static void imx7_snvs_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->reset = imx7_snvs_reset; + dc->vmsd = &vmstate_imx7_snvs; dc->desc = "i.MX7 Secure Non-Volatile Storage Module"; } diff --git a/hw/misc/trace-events b/hw/misc/trace-events index 05ff692441..85725506bf 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -116,8 +116,8 @@ imx7_gpr_read(uint64_t offset) "addr 0x%08" PRIx64 imx7_gpr_write(uint64_t offset, uint64_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx64 # imx7_snvs.c -imx7_snvs_read(uint64_t offset, uint32_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx32 -imx7_snvs_write(uint64_t offset, uint32_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx32 +imx7_snvs_read(uint64_t offset, uint64_t value, unsigned size) "i.MX SNVS read: offset 0x%08" PRIx64 " value 0x%08" PRIx64 " size %u" +imx7_snvs_write(uint64_t offset, uint64_t value, unsigned size) "i.MX SNVS write: offset 0x%08" PRIx64 " value 0x%08" PRIx64 " size %u" # mos6522.c mos6522_set_counter(int index, unsigned int val) "T%d.counter=%d" diff --git a/include/hw/misc/imx7_snvs.h b/include/hw/misc/imx7_snvs.h index 14a1d6fe6b..1272076086 100644 --- a/include/hw/misc/imx7_snvs.h +++ b/include/hw/misc/imx7_snvs.h @@ -20,7 +20,9 @@ enum IMX7SNVSRegisters { SNVS_LPCR = 0x38, SNVS_LPCR_TOP = BIT(6), - SNVS_LPCR_DP_EN = BIT(5) + SNVS_LPCR_DP_EN = BIT(5), + SNVS_LPSRTCMR = 0x050, /* Secure Real Time Counter MSB Register */ + SNVS_LPSRTCLR = 0x054, /* Secure Real Time Counter LSB Register */ }; #define TYPE_IMX7_SNVS "imx7.snvs" @@ -31,6 +33,9 @@ struct IMX7SNVSState { SysBusDevice parent_obj; MemoryRegion mmio; + + uint64_t tick_offset; + uint64_t lpcr; }; #endif /* IMX7_SNVS_H */ diff --git a/target/arm/cpu.c b/target/arm/cpu.c index efb22a87f9..650e09b29c 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -1686,7 +1686,7 @@ void arm_cpu_post_init(Object *obj) } if (kvm_enabled()) { - kvm_arm_add_vcpu_properties(obj); + kvm_arm_add_vcpu_properties(cpu); } #ifndef CONFIG_USER_ONLY diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 1e9c6c85ae..8e30a7993e 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -66,7 +66,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) */ if (kvm_enabled()) { if (kvm_arm_sve_supported()) { - cpu->sve_vq.supported = kvm_arm_sve_get_vls(CPU(cpu)); + cpu->sve_vq.supported = kvm_arm_sve_get_vls(cpu); vq_supported = cpu->sve_vq.supported; } else { assert(!cpu_isar_feature(aa64_sve, cpu)); diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index cbfba532f5..83d2619080 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -1026,14 +1026,6 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_RW, .accessfn = access_tda, .type = ARM_CP_NOP }, - /* - * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor - * to save and restore a 32-bit guest's DBGVCR) - */ - { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, - .access = PL2_RW, .accessfn = access_tda, - .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, /* * Dummy MDCCINT_EL1, since we don't implement the Debug Communications * Channel but Linux may try to access this register. The 32-bit @@ -1062,6 +1054,18 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) }, }; +/* These are present only when EL1 supports AArch32 */ +static const ARMCPRegInfo debug_aa32_el1_reginfo[] = { + /* + * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor + * to save and restore a 32-bit guest's DBGVCR) + */ + { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, + .access = PL2_RW, .accessfn = access_tda, + .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, +}; + static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { /* 64 bit access versions of the (dummy) debug registers */ { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, @@ -1207,6 +1211,9 @@ void define_debug_regs(ARMCPU *cpu) assert(ctx_cmps <= brps); define_arm_cp_regs(cpu, debug_cp_reginfo); + if (cpu_isar_feature(aa64_aa32_el1, cpu)) { + define_arm_cp_regs(cpu, debug_aa32_el1_reginfo); + } if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); diff --git a/target/arm/helper.c b/target/arm/helper.c index 2746d3fdac..fc546df5c7 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1475,6 +1475,22 @@ static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, pmu_op_finish(env); } +static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint64_t pmcr = env->cp15.c9_pmcr; + + /* + * If EL2 is implemented and enabled for the current security state, reads + * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN. + */ + if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) { + pmcr &= ~PMCRN_MASK; + pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT; + } + + return pmcr; +} + static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -5698,20 +5714,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, - { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, - .access = PL2_RW, - .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP, - .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) }, - { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, - .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, - .writefn = dacr_write, .raw_writefn = raw_write, - .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, - { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, - .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, - .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, @@ -5746,6 +5748,24 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, }; +/* These are present only when EL1 supports AArch32 */ +static const ARMCPRegInfo v8_aa32_el1_reginfo[] = { + { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, + .access = PL2_RW, + .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP, + .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) }, + { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, + .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, + .writefn = dacr_write, .raw_writefn = raw_write, + .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, + { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, + .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, + .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, +}; + static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) { ARMCPU *cpu = env_archcpu(env); @@ -7154,8 +7174,9 @@ static void define_pmu_regs(ARMCPU *cpu) .fgt = FGT_PMCR_EL0, .type = ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), - .accessfn = pmreg_access, .writefn = pmcr_write, - .raw_writefn = raw_write, + .accessfn = pmreg_access, + .readfn = pmcr_read, .raw_readfn = raw_read, + .writefn = pmcr_write, .raw_writefn = raw_write, }; ARMCPRegInfo pmcr64 = { .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, @@ -7165,6 +7186,7 @@ static void define_pmu_regs(ARMCPU *cpu) .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), .resetvalue = cpu->isar.reset_pmcr_el0, + .readfn = pmcr_read, .raw_readfn = raw_read, .writefn = pmcr_write, .raw_writefn = raw_write, }; @@ -7645,6 +7667,7 @@ static const ARMCPRegInfo rndr_reginfo[] = { static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, uint64_t value) { +#ifdef CONFIG_TCG ARMCPU *cpu = env_archcpu(env); /* CTR_EL0 System register -> DminLine, bits [19:16] */ uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); @@ -7669,6 +7692,10 @@ static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, } #endif /*CONFIG_USER_ONLY*/ } +#else + /* Handled by hardware accelerator. */ + g_assert_not_reached(); +#endif /* CONFIG_TCG */ } static const ARMCPRegInfo dcpop_reg[] = { @@ -8716,6 +8743,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) } define_arm_cp_regs(cpu, v8_idregs); define_arm_cp_regs(cpu, v8_cp_reginfo); + if (cpu_isar_feature(aa64_aa32_el1, cpu)) { + define_arm_cp_regs(cpu, v8_aa32_el1_reginfo); + } for (i = 4; i < 16; i++) { /* @@ -10135,61 +10165,6 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, } } -/* Sign/zero extend */ -uint32_t HELPER(sxtb16)(uint32_t x) -{ - uint32_t res; - res = (uint16_t)(int8_t)x; - res |= (uint32_t)(int8_t)(x >> 16) << 16; - return res; -} - -static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) -{ - /* - * Take a division-by-zero exception if necessary; otherwise return - * to get the usual non-trapping division behaviour (result of 0) - */ - if (arm_feature(env, ARM_FEATURE_M) - && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { - raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); - } -} - -uint32_t HELPER(uxtb16)(uint32_t x) -{ - uint32_t res; - res = (uint16_t)(uint8_t)x; - res |= (uint32_t)(uint8_t)(x >> 16) << 16; - return res; -} - -int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) -{ - if (den == 0) { - handle_possible_div0_trap(env, GETPC()); - return 0; - } - if (num == INT_MIN && den == -1) { - return INT_MIN; - } - return num / den; -} - -uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) -{ - if (den == 0) { - handle_possible_div0_trap(env, GETPC()); - return 0; - } - return num / den; -} - -uint32_t HELPER(rbit)(uint32_t x) -{ - return revbit32(x); -} - #ifdef CONFIG_USER_ONLY static void switch_mode(CPUARMState *env, int mode) diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 7903e2ddde..c5a3183843 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -2,6 +2,8 @@ * ARM implementation of KVM hooks * * Copyright Christoffer Dall 2009-2010 + * Copyright Mian-M. Hamayun 2013, Virtual Open Systems + * Copyright Alex Bennée 2014, Linaro * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -19,6 +21,7 @@ #include "qom/object.h" #include "qapi/error.h" #include "sysemu/sysemu.h" +#include "sysemu/runstate.h" #include "sysemu/kvm.h" #include "sysemu/kvm_int.h" #include "kvm_arm.h" @@ -28,10 +31,13 @@ #include "hw/pci/pci.h" #include "exec/memattrs.h" #include "exec/address-spaces.h" +#include "exec/gdbstub.h" #include "hw/boards.h" #include "hw/irq.h" #include "qapi/visitor.h" #include "qemu/log.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/ghes.h" const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_LAST_INFO @@ -41,28 +47,54 @@ static bool cap_has_mp_state; static bool cap_has_inject_serror_esr; static bool cap_has_inject_ext_dabt; +/** + * ARMHostCPUFeatures: information about the host CPU (identified + * by asking the host kernel) + */ +typedef struct ARMHostCPUFeatures { + ARMISARegisters isar; + uint64_t features; + uint32_t target; + const char *dtb_compatible; +} ARMHostCPUFeatures; + static ARMHostCPUFeatures arm_host_cpu_features; -int kvm_arm_vcpu_init(CPUState *cs) +/** + * kvm_arm_vcpu_init: + * @cpu: ARMCPU + * + * Initialize (or reinitialize) the VCPU by invoking the + * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature + * bitmask specified in the CPUState. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_arm_vcpu_init(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); struct kvm_vcpu_init init; init.target = cpu->kvm_target; memcpy(init.features, cpu->kvm_init_features, sizeof(init.features)); - return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init); + return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_INIT, &init); } -int kvm_arm_vcpu_finalize(CPUState *cs, int feature) +/** + * kvm_arm_vcpu_finalize: + * @cpu: ARMCPU + * @feature: feature to finalize + * + * Finalizes the configuration of the specified VCPU feature by + * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring + * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of + * KVM's API documentation. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature) { - return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature); -} - -void kvm_arm_init_serror_injection(CPUState *cs) -{ - cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state, - KVM_CAP_ARM_INJECT_SERROR_ESR); + return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature); } bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, @@ -167,6 +199,260 @@ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray) } } +static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) +{ + uint64_t ret; + struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; + int err; + + assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); + err = ioctl(fd, KVM_GET_ONE_REG, &idreg); + if (err < 0) { + return -1; + } + *pret = ret; + return 0; +} + +static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id) +{ + struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret }; + + assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); + return ioctl(fd, KVM_GET_ONE_REG, &idreg); +} + +static bool kvm_arm_pauth_supported(void) +{ + return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) && + kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); +} + +static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) +{ + /* Identify the feature bits corresponding to the host CPU, and + * fill out the ARMHostCPUClass fields accordingly. To do this + * we have to create a scratch VM, create a single CPU inside it, + * and then query that CPU for the relevant ID registers. + */ + int fdarray[3]; + bool sve_supported; + bool pmu_supported = false; + uint64_t features = 0; + int err; + + /* Old kernels may not know about the PREFERRED_TARGET ioctl: however + * we know these will only support creating one kind of guest CPU, + * which is its preferred CPU type. Fortunately these old kernels + * support only a very limited number of CPUs. + */ + static const uint32_t cpus_to_try[] = { + KVM_ARM_TARGET_AEM_V8, + KVM_ARM_TARGET_FOUNDATION_V8, + KVM_ARM_TARGET_CORTEX_A57, + QEMU_KVM_ARM_TARGET_NONE + }; + /* + * target = -1 informs kvm_arm_create_scratch_host_vcpu() + * to use the preferred target + */ + struct kvm_vcpu_init init = { .target = -1, }; + + /* + * Ask for SVE if supported, so that we can query ID_AA64ZFR0, + * which is otherwise RAZ. + */ + sve_supported = kvm_arm_sve_supported(); + if (sve_supported) { + init.features[0] |= 1 << KVM_ARM_VCPU_SVE; + } + + /* + * Ask for Pointer Authentication if supported, so that we get + * the unsanitized field values for AA64ISAR1_EL1. + */ + if (kvm_arm_pauth_supported()) { + init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | + 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); + } + + if (kvm_arm_pmu_supported()) { + init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; + pmu_supported = true; + } + + if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { + return false; + } + + ahcf->target = init.target; + ahcf->dtb_compatible = "arm,arm-v8"; + + err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, + ARM64_SYS_REG(3, 0, 0, 4, 0)); + if (unlikely(err < 0)) { + /* + * Before v4.15, the kernel only exposed a limited number of system + * registers, not including any of the interesting AArch64 ID regs. + * For the most part we could leave these fields as zero with minimal + * effect, since this does not affect the values seen by the guest. + * + * However, it could cause problems down the line for QEMU, + * so provide a minimal v8.0 default. + * + * ??? Could read MIDR and use knowledge from cpu64.c. + * ??? Could map a page of memory into our temp guest and + * run the tiniest of hand-crafted kernels to extract + * the values seen by the guest. + * ??? Either of these sounds like too much effort just + * to work around running a modern host kernel. + */ + ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ + err = 0; + } else { + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, + ARM64_SYS_REG(3, 0, 0, 4, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, + ARM64_SYS_REG(3, 0, 0, 4, 5)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, + ARM64_SYS_REG(3, 0, 0, 5, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, + ARM64_SYS_REG(3, 0, 0, 5, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, + ARM64_SYS_REG(3, 0, 0, 6, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, + ARM64_SYS_REG(3, 0, 0, 6, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, + ARM64_SYS_REG(3, 0, 0, 6, 2)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, + ARM64_SYS_REG(3, 0, 0, 7, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, + ARM64_SYS_REG(3, 0, 0, 7, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, + ARM64_SYS_REG(3, 0, 0, 7, 2)); + + /* + * Note that if AArch32 support is not present in the host, + * the AArch32 sysregs are present to be read, but will + * return UNKNOWN values. This is neither better nor worse + * than skipping the reads and leaving 0, as we must avoid + * considering the values in every case. + */ + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, + ARM64_SYS_REG(3, 0, 0, 1, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, + ARM64_SYS_REG(3, 0, 0, 1, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, + ARM64_SYS_REG(3, 0, 0, 1, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, + ARM64_SYS_REG(3, 0, 0, 1, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, + ARM64_SYS_REG(3, 0, 0, 1, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, + ARM64_SYS_REG(3, 0, 0, 1, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, + ARM64_SYS_REG(3, 0, 0, 1, 7)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, + ARM64_SYS_REG(3, 0, 0, 2, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, + ARM64_SYS_REG(3, 0, 0, 2, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, + ARM64_SYS_REG(3, 0, 0, 2, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, + ARM64_SYS_REG(3, 0, 0, 2, 3)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, + ARM64_SYS_REG(3, 0, 0, 2, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, + ARM64_SYS_REG(3, 0, 0, 2, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, + ARM64_SYS_REG(3, 0, 0, 2, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, + ARM64_SYS_REG(3, 0, 0, 2, 7)); + + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, + ARM64_SYS_REG(3, 0, 0, 3, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, + ARM64_SYS_REG(3, 0, 0, 3, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, + ARM64_SYS_REG(3, 0, 0, 3, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, + ARM64_SYS_REG(3, 0, 0, 3, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, + ARM64_SYS_REG(3, 0, 0, 3, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, + ARM64_SYS_REG(3, 0, 0, 3, 6)); + + /* + * DBGDIDR is a bit complicated because the kernel doesn't + * provide an accessor for it in 64-bit mode, which is what this + * scratch VM is in, and there's no architected "64-bit sysreg + * which reads the same as the 32-bit register" the way there is + * for other ID registers. Instead we synthesize a value from the + * AArch64 ID_AA64DFR0, the same way the kernel code in + * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. + * We only do this if the CPU supports AArch32 at EL1. + */ + if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { + int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); + int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); + int ctx_cmps = + FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); + int version = 6; /* ARMv8 debug architecture */ + bool has_el3 = + !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); + uint32_t dbgdidr = 0; + + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); + dbgdidr |= (1 << 15); /* RES1 bit */ + ahcf->isar.dbgdidr = dbgdidr; + } + + if (pmu_supported) { + /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, + ARM64_SYS_REG(3, 3, 9, 12, 0)); + } + + if (sve_supported) { + /* + * There is a range of kernels between kernel commit 73433762fcae + * and f81cb2c3ad41 which have a bug where the kernel doesn't + * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has + * enabled SVE support, which resulted in an error rather than RAZ. + * So only read the register if we set KVM_ARM_VCPU_SVE above. + */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, + ARM64_SYS_REG(3, 0, 0, 4, 4)); + } + } + + kvm_arm_destroy_scratch_host_vcpu(fdarray); + + if (err < 0) { + return false; + } + + /* + * We can assume any KVM supporting CPU is at least a v8 + * with VFPv4+Neon; this in turn implies most of the other + * feature bits. + */ + features |= 1ULL << ARM_FEATURE_V8; + features |= 1ULL << ARM_FEATURE_NEON; + features |= 1ULL << ARM_FEATURE_AARCH64; + features |= 1ULL << ARM_FEATURE_PMU; + features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; + + ahcf->features = features; + + return true; +} + void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) { CPUARMState *env = &cpu->env; @@ -210,10 +496,10 @@ static void kvm_steal_time_set(Object *obj, bool value, Error **errp) } /* KVM VCPU properties should be prefixed with "kvm-". */ -void kvm_arm_add_vcpu_properties(Object *obj) +void kvm_arm_add_vcpu_properties(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(obj); CPUARMState *env = &cpu->env; + Object *obj = OBJECT(cpu); if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { cpu->kvm_adjvtime = true; @@ -271,6 +557,10 @@ int kvm_arch_init(MachineState *ms, KVMState *s) cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE); + /* Check whether user space can specify guest syndrome value */ + cap_has_inject_serror_esr = + kvm_check_extension(s, KVM_CAP_ARM_INJECT_SERROR_ESR); + if (ms->smp.cpus > 256 && !kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) { error_report("Using more than 256 vcpus requires a host kernel " @@ -308,7 +598,13 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } - kvm_arm_init_debug(s); + max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS); + hw_watchpoints = g_array_sized_new(true, true, + sizeof(HWWatchpoint), max_hw_wps); + + max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS); + hw_breakpoints = g_array_sized_new(true, true, + sizeof(HWBreakpoint), max_hw_bps); return ret; } @@ -469,11 +765,36 @@ static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx) return &cpu->cpreg_values[res - cpu->cpreg_indexes]; } -/* Initialize the ARMCPU cpreg list according to the kernel's +/** + * kvm_arm_reg_syncs_via_cpreg_list: + * @regidx: KVM register index + * + * Return true if this KVM register should be synchronized via the + * cpreg list of arbitrary system registers, false if it is synchronized + * by hand using code in kvm_arch_get/put_registers(). + */ +static bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) +{ + switch (regidx & KVM_REG_ARM_COPROC_MASK) { + case KVM_REG_ARM_CORE: + case KVM_REG_ARM64_SVE: + return false; + default: + return true; + } +} + +/** + * kvm_arm_init_cpreg_list: + * @cpu: ARMCPU + * + * Initialize the ARMCPU cpreg list according to the kernel's * definition of what CPU registers it knows about (and throw away * the previous TCG-created cpreg list). + * + * Returns: 0 if success, else < 0 error code */ -int kvm_arm_init_cpreg_list(ARMCPU *cpu) +static int kvm_arm_init_cpreg_list(ARMCPU *cpu) { struct kvm_reg_list rl; struct kvm_reg_list *rlp; @@ -546,6 +867,28 @@ out: return ret; } +/** + * kvm_arm_cpreg_level: + * @regidx: KVM register index + * + * Return the level of this coprocessor/system register. Return value is + * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE. + */ +static int kvm_arm_cpreg_level(uint64_t regidx) +{ + /* + * All system registers are assumed to be level KVM_PUT_RUNTIME_STATE. + * If a register should be written less often, you must add it here + * with a state of either KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. + */ + switch (regidx) { + case KVM_REG_ARM_TIMER_CNT: + case KVM_REG_ARM_PTIMER_CNT: + return KVM_PUT_FULL_STATE; + } + return KVM_PUT_RUNTIME_STATE; +} + bool write_kvmstate_to_list(ARMCPU *cpu) { CPUState *cs = CPU(cpu); @@ -638,7 +981,7 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu) /* Re-init VCPU so that all registers are set to * their respective reset values. */ - ret = kvm_arm_vcpu_init(CPU(cpu)); + ret = kvm_arm_vcpu_init(cpu); if (ret < 0) { fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret)); abort(); @@ -660,54 +1003,50 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu) /* * Update KVM's MP_STATE based on what QEMU thinks it is */ -int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) +static int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) { if (cap_has_mp_state) { struct kvm_mp_state mp_state = { .mp_state = (cpu->power_state == PSCI_OFF) ? KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE }; - int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); - if (ret) { - fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n", - __func__, ret, strerror(-ret)); - return -1; - } + return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); } - return 0; } /* * Sync the KVM MP_STATE into QEMU */ -int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) +static int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) { if (cap_has_mp_state) { struct kvm_mp_state mp_state; int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state); if (ret) { - fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n", - __func__, ret, strerror(-ret)); - abort(); + return ret; } cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ? PSCI_OFF : PSCI_ON; } - return 0; } -void kvm_arm_get_virtual_time(CPUState *cs) +/** + * kvm_arm_get_virtual_time: + * @cpu: ARMCPU + * + * Gets the VCPU's virtual counter and stores it in the KVM CPU state. + */ +static void kvm_arm_get_virtual_time(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); int ret; if (cpu->kvm_vtime_dirty) { return; } - ret = kvm_get_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); + ret = kvm_get_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); if (ret) { error_report("Failed to get KVM_REG_ARM_TIMER_CNT"); abort(); @@ -716,16 +1055,21 @@ void kvm_arm_get_virtual_time(CPUState *cs) cpu->kvm_vtime_dirty = true; } -void kvm_arm_put_virtual_time(CPUState *cs) +/** + * kvm_arm_put_virtual_time: + * @cpu: ARMCPU + * + * Sets the VCPU's virtual counter to the value stored in the KVM CPU state. + */ +static void kvm_arm_put_virtual_time(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); int ret; if (!cpu->kvm_vtime_dirty) { return; } - ret = kvm_set_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); + ret = kvm_set_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); if (ret) { error_report("Failed to set KVM_REG_ARM_TIMER_CNT"); abort(); @@ -734,7 +1078,15 @@ void kvm_arm_put_virtual_time(CPUState *cs) cpu->kvm_vtime_dirty = false; } -int kvm_put_vcpu_events(ARMCPU *cpu) +/** + * kvm_put_vcpu_events: + * @cpu: ARMCPU + * + * Put VCPU related state to kvm. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_put_vcpu_events(ARMCPU *cpu) { CPUARMState *env = &cpu->env; struct kvm_vcpu_events events; @@ -763,7 +1115,15 @@ int kvm_put_vcpu_events(ARMCPU *cpu) return ret; } -int kvm_get_vcpu_events(ARMCPU *cpu) +/** + * kvm_get_vcpu_events: + * @cpu: ARMCPU + * + * Get VCPU related state from kvm. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_get_vcpu_events(ARMCPU *cpu) { CPUARMState *env = &cpu->env; struct kvm_vcpu_events events; @@ -787,6 +1147,63 @@ int kvm_get_vcpu_events(ARMCPU *cpu) return 0; } +#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0) +#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2) + +/* + * ESR_EL1 + * ISS encoding + * AARCH64: DFSC, bits [5:0] + * AARCH32: + * TTBCR.EAE == 0 + * FS[4] - DFSR[10] + * FS[3:0] - DFSR[3:0] + * TTBCR.EAE == 1 + * FS, bits [5:0] + */ +#define ESR_DFSC(aarch64, lpae, v) \ + ((aarch64 || (lpae)) ? ((v) & 0x3F) \ + : (((v) >> 6) | ((v) & 0x1F))) + +#define ESR_DFSC_EXTABT(aarch64, lpae) \ + ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8) + +/** + * kvm_arm_verify_ext_dabt_pending: + * @cpu: ARMCPU + * + * Verify the fault status code wrt the Ext DABT injection + * + * Returns: true if the fault status code is as expected, false otherwise + */ +static bool kvm_arm_verify_ext_dabt_pending(ARMCPU *cpu) +{ + CPUState *cs = CPU(cpu); + uint64_t dfsr_val; + + if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) { + CPUARMState *env = &cpu->env; + int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64); + int lpae = 0; + + if (!aarch64_mode) { + uint64_t ttbcr; + + if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) { + lpae = arm_feature(env, ARM_FEATURE_LPAE) + && (ttbcr & TTBCR_EAE); + } + } + /* + * The verification here is based on the DFSC bits + * of the ESR_EL1 reg only + */ + return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) == + ESR_DFSC_EXTABT(aarch64_mode, lpae)); + } + return false; +} + void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) { ARMCPU *cpu = ARM_CPU(cs); @@ -801,7 +1218,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) * an IMPLEMENTATION DEFINED exception (for 32-bit EL1) */ if (!arm_feature(env, ARM_FEATURE_AARCH64) && - unlikely(!kvm_arm_verify_ext_dabt_pending(cs))) { + unlikely(!kvm_arm_verify_ext_dabt_pending(cpu))) { error_report("Data abort exception with no valid ISS generated by " "guest memory access. KVM unable to emulate faulting " @@ -868,35 +1285,33 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) return MEMTXATTRS_UNSPECIFIED; } -void kvm_arm_vm_state_change(void *opaque, bool running, RunState state) +static void kvm_arm_vm_state_change(void *opaque, bool running, RunState state) { - CPUState *cs = opaque; - ARMCPU *cpu = ARM_CPU(cs); + ARMCPU *cpu = opaque; if (running) { if (cpu->kvm_adjvtime) { - kvm_arm_put_virtual_time(cs); + kvm_arm_put_virtual_time(cpu); } } else { if (cpu->kvm_adjvtime) { - kvm_arm_get_virtual_time(cs); + kvm_arm_get_virtual_time(cpu); } } } /** * kvm_arm_handle_dabt_nisv: - * @cs: CPUState + * @cpu: ARMCPU * @esr_iss: ISS encoding (limited) for the exception from Data Abort * ISV bit set to '0b0' -> no valid instruction syndrome * @fault_ipa: faulting address for the synchronous data abort * * Returns: 0 if the exception has been handled, < 0 otherwise */ -static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, +static int kvm_arm_handle_dabt_nisv(ARMCPU *cpu, uint64_t esr_iss, uint64_t fault_ipa) { - ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; /* * Request KVM to inject the external data abort into the guest @@ -912,7 +1327,7 @@ static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, */ events.exception.ext_dabt_pending = 1; /* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */ - if (!kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events)) { + if (!kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events)) { env->ext_dabt_raised = 1; return 0; } @@ -925,19 +1340,97 @@ static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, return -1; } +/** + * kvm_arm_handle_debug: + * @cpu: ARMCPU + * @debug_exit: debug part of the KVM exit structure + * + * Returns: TRUE if the debug exception was handled. + * + * See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register + * + * To minimise translating between kernel and user-space the kernel + * ABI just provides user-space with the full exception syndrome + * register value to be decoded in QEMU. + */ +static bool kvm_arm_handle_debug(ARMCPU *cpu, + struct kvm_debug_exit_arch *debug_exit) +{ + int hsr_ec = syn_get_ec(debug_exit->hsr); + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + + /* Ensure PC is synchronised */ + kvm_cpu_synchronize_state(cs); + + switch (hsr_ec) { + case EC_SOFTWARESTEP: + if (cs->singlestep_enabled) { + return true; + } else { + /* + * The kernel should have suppressed the guest's ability to + * single step at this point so something has gone wrong. + */ + error_report("%s: guest single-step while debugging unsupported" + " (%"PRIx64", %"PRIx32")", + __func__, env->pc, debug_exit->hsr); + return false; + } + break; + case EC_AA64_BKPT: + if (kvm_find_sw_breakpoint(cs, env->pc)) { + return true; + } + break; + case EC_BREAKPOINT: + if (find_hw_breakpoint(cs, env->pc)) { + return true; + } + break; + case EC_WATCHPOINT: + { + CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); + if (wp) { + cs->watchpoint_hit = wp; + return true; + } + break; + } + default: + error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", + __func__, debug_exit->hsr, env->pc); + } + + /* If we are not handling the debug exception it must belong to + * the guest. Let's re-use the existing TCG interrupt code to set + * everything up properly. + */ + cs->exception_index = EXCP_BKPT; + env->exception.syndrome = debug_exit->hsr; + env->exception.vaddress = debug_exit->far; + env->exception.target_el = 1; + qemu_mutex_lock_iothread(); + arm_cpu_do_interrupt(cs); + qemu_mutex_unlock_iothread(); + + return false; +} + int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { + ARMCPU *cpu = ARM_CPU(cs); int ret = 0; switch (run->exit_reason) { case KVM_EXIT_DEBUG: - if (kvm_arm_handle_debug(cs, &run->debug.arch)) { + if (kvm_arm_handle_debug(cpu, &run->debug.arch)) { ret = EXCP_DEBUG; } /* otherwise return to guest */ break; case KVM_EXIT_ARM_NISV: /* External DABT with no valid iss to decode */ - ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss, + ret = kvm_arm_handle_dabt_nisv(cpu, run->arm_nisv.esr_iss, run->arm_nisv.fault_ipa); break; default: @@ -958,12 +1451,47 @@ int kvm_arch_process_async_events(CPUState *cs) return 0; } +/** + * kvm_arm_hw_debug_active: + * @cpu: ARMCPU + * + * Return: TRUE if any hardware breakpoints in use. + */ +static bool kvm_arm_hw_debug_active(ARMCPU *cpu) +{ + return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); +} + +/** + * kvm_arm_copy_hw_debug_data: + * @ptr: kvm_guest_debug_arch structure + * + * Copy the architecture specific debug registers into the + * kvm_guest_debug ioctl structure. + */ +static void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) +{ + int i; + memset(ptr, 0, sizeof(struct kvm_guest_debug_arch)); + + for (i = 0; i < max_hw_wps; i++) { + HWWatchpoint *wp = get_hw_wp(i); + ptr->dbg_wcr[i] = wp->wcr; + ptr->dbg_wvr[i] = wp->wvr; + } + for (i = 0; i < max_hw_bps; i++) { + HWBreakpoint *bp = get_hw_bp(i); + ptr->dbg_bcr[i] = bp->bcr; + ptr->dbg_bvr[i] = bp->bvr; + } +} + void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) { if (kvm_sw_breakpoints_active(cs)) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; } - if (kvm_arm_hw_debug_active(cs)) { + if (kvm_arm_hw_debug_active(ARM_CPU(cs))) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW; kvm_arm_copy_hw_debug_data(&dbg->arch); } @@ -1117,3 +1645,782 @@ void kvm_arch_accel_class_init(ObjectClass *oc) object_class_property_set_description(oc, "eager-split-size", "Eager Page Split chunk size for hugepages. (default: 0, disabled)"); } + +int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) +{ + switch (type) { + case GDB_BREAKPOINT_HW: + return insert_hw_breakpoint(addr); + break; + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_ACCESS: + return insert_hw_watchpoint(addr, len, type); + default: + return -ENOSYS; + } +} + +int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) +{ + switch (type) { + case GDB_BREAKPOINT_HW: + return delete_hw_breakpoint(addr); + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_ACCESS: + return delete_hw_watchpoint(addr, len, type); + default: + return -ENOSYS; + } +} + +void kvm_arch_remove_all_hw_breakpoints(void) +{ + if (cur_hw_wps > 0) { + g_array_remove_range(hw_watchpoints, 0, cur_hw_wps); + } + if (cur_hw_bps > 0) { + g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); + } +} + +static bool kvm_arm_set_device_attr(ARMCPU *cpu, struct kvm_device_attr *attr, + const char *name) +{ + int err; + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr); + if (err != 0) { + error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); + return false; + } + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEVICE_ATTR, attr); + if (err != 0) { + error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); + return false; + } + + return true; +} + +void kvm_arm_pmu_init(ARMCPU *cpu) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_INIT, + }; + + if (!cpu->has_pmu) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) { + error_report("failed to init PMU"); + abort(); + } +} + +void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .addr = (intptr_t)&irq, + .attr = KVM_ARM_VCPU_PMU_V3_IRQ, + }; + + if (!cpu->has_pmu) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) { + error_report("failed to set irq for PMU"); + abort(); + } +} + +void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PVTIME_CTRL, + .attr = KVM_ARM_VCPU_PVTIME_IPA, + .addr = (uint64_t)&ipa, + }; + + if (cpu->kvm_steal_time == ON_OFF_AUTO_OFF) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PVTIME IPA")) { + error_report("failed to init PVTIME IPA"); + abort(); + } +} + +void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) +{ + bool has_steal_time = kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME); + + if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { + if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + cpu->kvm_steal_time = ON_OFF_AUTO_OFF; + } else { + cpu->kvm_steal_time = ON_OFF_AUTO_ON; + } + } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { + if (!has_steal_time) { + error_setg(errp, "'kvm-steal-time' cannot be enabled " + "on this host"); + return; + } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + /* + * DEN0057A chapter 2 says "This specification only covers + * systems in which the Execution state of the hypervisor + * as well as EL1 of virtual machines is AArch64.". And, + * to ensure that, the smc/hvc calls are only specified as + * smc64/hvc64. + */ + error_setg(errp, "'kvm-steal-time' cannot be enabled " + "for AArch32 guests"); + return; + } + } +} + +bool kvm_arm_aarch32_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); +} + +bool kvm_arm_sve_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); +} + +QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); + +uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) +{ + /* Only call this function if kvm_arm_sve_supported() returns true. */ + static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS]; + static bool probed; + uint32_t vq = 0; + int i; + + /* + * KVM ensures all host CPUs support the same set of vector lengths. + * So we only need to create the scratch VCPUs once and then cache + * the results. + */ + if (!probed) { + struct kvm_vcpu_init init = { + .target = -1, + .features[0] = (1 << KVM_ARM_VCPU_SVE), + }; + struct kvm_one_reg reg = { + .id = KVM_REG_ARM64_SVE_VLS, + .addr = (uint64_t)&vls[0], + }; + int fdarray[3], ret; + + probed = true; + + if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { + error_report("failed to create scratch VCPU with SVE enabled"); + abort(); + } + ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®); + kvm_arm_destroy_scratch_host_vcpu(fdarray); + if (ret) { + error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s", + strerror(errno)); + abort(); + } + + for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { + if (vls[i]) { + vq = 64 - clz64(vls[i]) + i * 64; + break; + } + } + if (vq > ARM_MAX_VQ) { + warn_report("KVM supports vector lengths larger than " + "QEMU can enable"); + vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ); + } + } + + return vls[0]; +} + +static int kvm_arm_sve_set_vls(ARMCPU *cpu) +{ + uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map }; + + assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); + + return kvm_set_one_reg(CPU(cpu), KVM_REG_ARM64_SVE_VLS, &vls[0]); +} + +#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 + +int kvm_arch_init_vcpu(CPUState *cs) +{ + int ret; + uint64_t mpidr; + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t psciver; + + if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || + !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { + error_report("KVM is not supported for this guest CPU type"); + return -EINVAL; + } + + qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cpu); + + /* Determine init features for this CPU */ + memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); + if (cs->start_powered_off) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; + } + if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { + cpu->psci_version = QEMU_PSCI_VERSION_0_2; + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; + } + if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; + } + if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) { + cpu->has_pmu = false; + } + if (cpu->has_pmu) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; + } else { + env->features &= ~(1ULL << ARM_FEATURE_PMU); + } + if (cpu_isar_feature(aa64_sve, cpu)) { + assert(kvm_arm_sve_supported()); + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; + } + if (cpu_isar_feature(aa64_pauth, cpu)) { + cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | + 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); + } + + /* Do KVM_ARM_VCPU_INIT ioctl */ + ret = kvm_arm_vcpu_init(cpu); + if (ret) { + return ret; + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arm_sve_set_vls(cpu); + if (ret) { + return ret; + } + ret = kvm_arm_vcpu_finalize(cpu, KVM_ARM_VCPU_SVE); + if (ret) { + return ret; + } + } + + /* + * KVM reports the exact PSCI version it is implementing via a + * special sysreg. If it is present, use its contents to determine + * what to report to the guest in the dtb (it is the PSCI version, + * in the same 15-bits major 16-bits minor format that PSCI_VERSION + * returns). + */ + if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) { + cpu->psci_version = psciver; + } + + /* + * When KVM is in use, PSCI is emulated in-kernel and not by qemu. + * Currently KVM has its own idea about MPIDR assignment, so we + * override our defaults with what we get from KVM. + */ + ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); + if (ret) { + return ret; + } + cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; + + return kvm_arm_init_cpreg_list(cpu); +} + +int kvm_arch_destroy_vcpu(CPUState *cs) +{ + return 0; +} + +/* Callers must hold the iothread mutex lock */ +static void kvm_inject_arm_sea(CPUState *c) +{ + ARMCPU *cpu = ARM_CPU(c); + CPUARMState *env = &cpu->env; + uint32_t esr; + bool same_el; + + c->exception_index = EXCP_DATA_ABORT; + env->exception.target_el = 1; + + /* + * Set the DFSC to synchronous external abort and set FnV to not valid, + * this will tell guest the FAR_ELx is UNKNOWN for this abort. + */ + same_el = arm_current_el(env) == env->exception.target_el; + esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10); + + env->exception.syndrome = esr; + + arm_cpu_do_interrupt(c); +} + +#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +static int kvm_arch_put_fpsimd(CPUState *cs) +{ + CPUARMState *env = &ARM_CPU(cs)->env; + int i, ret; + + for (i = 0; i < 32; i++) { + uint64_t *q = aa64_vfp_qreg(env, i); +#if HOST_BIG_ENDIAN + uint64_t fp_val[2] = { q[1], q[0] }; + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), + fp_val); +#else + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); +#endif + if (ret) { + return ret; + } + } + + return 0; +} + +/* + * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits + * and PREGS and the FFR have a slice size of 256 bits. However we simply hard + * code the slice index to zero for now as it's unlikely we'll need more than + * one slice for quite some time. + */ +static int kvm_arch_put_sve(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t tmp[ARM_MAX_VQ * 2]; + uint64_t *r; + int n, ret; + + for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { + r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); + if (ret) { + return ret; + } + } + + for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { + r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0], + DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); + if (ret) { + return ret; + } + } + + r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0], + DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); + if (ret) { + return ret; + } + + return 0; +} + +int kvm_arch_put_registers(CPUState *cs, int level) +{ + uint64_t val; + uint32_t fpr; + int i, ret; + unsigned int el; + + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + /* If we are in AArch32 mode then we need to copy the AArch32 regs to the + * AArch64 registers before pushing them out to 64-bit KVM. + */ + if (!is_a64(env)) { + aarch64_sync_32_to_64(env); + } + + for (i = 0; i < 31; i++) { + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), + &env->xregs[i]); + if (ret) { + return ret; + } + } + + /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the + * QEMU side we keep the current SP in xregs[31] as well. + */ + aarch64_save_sp(env, 1); + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); + if (ret) { + return ret; + } + + /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */ + if (is_a64(env)) { + val = pstate_read(env); + } else { + val = cpsr_read(env); + } + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); + if (ret) { + return ret; + } + + /* Saved Program State Registers + * + * Before we restore from the banked_spsr[] array we need to + * ensure that any modifications to env->spsr are correctly + * reflected in the banks. + */ + el = arm_current_el(env); + if (el > 0 && !is_a64(env)) { + i = bank_number(env->uncached_cpsr & CPSR_M); + env->banked_spsr[i] = env->spsr; + } + + /* KVM 0-4 map to QEMU banks 1-5 */ + for (i = 0; i < KVM_NR_SPSR; i++) { + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]), + &env->banked_spsr[i + 1]); + if (ret) { + return ret; + } + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arch_put_sve(cs); + } else { + ret = kvm_arch_put_fpsimd(cs); + } + if (ret) { + return ret; + } + + fpr = vfp_get_fpsr(env); + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); + if (ret) { + return ret; + } + + fpr = vfp_get_fpcr(env); + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); + if (ret) { + return ret; + } + + write_cpustate_to_list(cpu, true); + + if (!write_list_to_kvmstate(cpu, level)) { + return -EINVAL; + } + + /* + * Setting VCPU events should be triggered after syncing the registers + * to avoid overwriting potential changes made by KVM upon calling + * KVM_SET_VCPU_EVENTS ioctl + */ + ret = kvm_put_vcpu_events(cpu); + if (ret) { + return ret; + } + + return kvm_arm_sync_mpstate_to_kvm(cpu); +} + +static int kvm_arch_get_fpsimd(CPUState *cs) +{ + CPUARMState *env = &ARM_CPU(cs)->env; + int i, ret; + + for (i = 0; i < 32; i++) { + uint64_t *q = aa64_vfp_qreg(env, i); + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); + if (ret) { + return ret; + } else { +#if HOST_BIG_ENDIAN + uint64_t t; + t = q[0], q[0] = q[1], q[1] = t; +#endif + } + } + + return 0; +} + +/* + * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits + * and PREGS and the FFR have a slice size of 256 bits. However we simply hard + * code the slice index to zero for now as it's unlikely we'll need more than + * one slice for quite some time. + */ +static int kvm_arch_get_sve(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t *r; + int n, ret; + + for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { + r = &env->vfp.zregs[n].d[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, cpu->sve_max_vq * 2); + } + + for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { + r = &env->vfp.pregs[n].p[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + } + + r = &env->vfp.pregs[FFR_PRED_NUM].p[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + + return 0; +} + +int kvm_arch_get_registers(CPUState *cs) +{ + uint64_t val; + unsigned int el; + uint32_t fpr; + int i, ret; + + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + for (i = 0; i < 31; i++) { + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), + &env->xregs[i]); + if (ret) { + return ret; + } + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); + if (ret) { + return ret; + } + + env->aarch64 = ((val & PSTATE_nRW) == 0); + if (is_a64(env)) { + pstate_write(env, val); + } else { + cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); + } + + /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the + * QEMU side we keep the current SP in xregs[31] as well. + */ + aarch64_restore_sp(env, 1); + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); + if (ret) { + return ret; + } + + /* If we are in AArch32 mode then we need to sync the AArch32 regs with the + * incoming AArch64 regs received from 64-bit KVM. + * We must perform this after all of the registers have been acquired from + * the kernel. + */ + if (!is_a64(env)) { + aarch64_sync_64_to_32(env); + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); + if (ret) { + return ret; + } + + /* Fetch the SPSR registers + * + * KVM SPSRs 0-4 map to QEMU banks 1-5 + */ + for (i = 0; i < KVM_NR_SPSR; i++) { + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]), + &env->banked_spsr[i + 1]); + if (ret) { + return ret; + } + } + + el = arm_current_el(env); + if (el > 0 && !is_a64(env)) { + i = bank_number(env->uncached_cpsr & CPSR_M); + env->spsr = env->banked_spsr[i]; + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arch_get_sve(cs); + } else { + ret = kvm_arch_get_fpsimd(cs); + } + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); + if (ret) { + return ret; + } + vfp_set_fpsr(env, fpr); + + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); + if (ret) { + return ret; + } + vfp_set_fpcr(env, fpr); + + ret = kvm_get_vcpu_events(cpu); + if (ret) { + return ret; + } + + if (!write_kvmstate_to_list(cpu)) { + return -EINVAL; + } + /* Note that it's OK to have registers which aren't in CPUState, + * so we can ignore a failure return here. + */ + write_list_to_cpustate(cpu); + + ret = kvm_arm_sync_mpstate_to_qemu(cpu); + + /* TODO: other registers */ + return ret; +} + +void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) +{ + ram_addr_t ram_addr; + hwaddr paddr; + + assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); + + if (acpi_ghes_present() && addr) { + ram_addr = qemu_ram_addr_from_host(addr); + if (ram_addr != RAM_ADDR_INVALID && + kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { + kvm_hwpoison_page_add(ram_addr); + /* + * If this is a BUS_MCEERR_AR, we know we have been called + * synchronously from the vCPU thread, so we can easily + * synchronize the state and inject an error. + * + * TODO: we currently don't tell the guest at all about + * BUS_MCEERR_AO. In that case we might either be being + * called synchronously from the vCPU thread, or a bit + * later from the main thread, so doing the injection of + * the error would be more complicated. + */ + if (code == BUS_MCEERR_AR) { + kvm_cpu_synchronize_state(c); + if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { + kvm_inject_arm_sea(c); + } else { + error_report("failed to record the error"); + abort(); + } + } + return; + } + if (code == BUS_MCEERR_AO) { + error_report("Hardware memory error at addr %p for memory used by " + "QEMU itself instead of guest system!", addr); + } + } + + if (code == BUS_MCEERR_AR) { + error_report("Hardware memory error!"); + exit(1); + } +} + +/* C6.6.29 BRK instruction */ +static const uint32_t brk_insn = 0xd4200000; + +int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { + return -EINVAL; + } + return 0; +} + +int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + static uint32_t brk; + + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || + brk != brk_insn || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { + return -EINVAL; + } + return 0; +} diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c deleted file mode 100644 index 3c175c93a7..0000000000 --- a/target/arm/kvm64.c +++ /dev/null @@ -1,1290 +0,0 @@ -/* - * ARM implementation of KVM hooks, 64 bit specific code - * - * Copyright Mian-M. Hamayun 2013, Virtual Open Systems - * Copyright Alex Bennée 2014, Linaro - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include -#include - -#include -#include - -#include "qapi/error.h" -#include "cpu.h" -#include "qemu/timer.h" -#include "qemu/error-report.h" -#include "qemu/host-utils.h" -#include "qemu/main-loop.h" -#include "exec/gdbstub.h" -#include "sysemu/runstate.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_int.h" -#include "kvm_arm.h" -#include "internals.h" -#include "cpu-features.h" -#include "hw/acpi/acpi.h" -#include "hw/acpi/ghes.h" - -static bool have_guest_debug; - -void kvm_arm_init_debug(KVMState *s) -{ - have_guest_debug = kvm_check_extension(s, - KVM_CAP_SET_GUEST_DEBUG); - - max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS); - hw_watchpoints = g_array_sized_new(true, true, - sizeof(HWWatchpoint), max_hw_wps); - - max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS); - hw_breakpoints = g_array_sized_new(true, true, - sizeof(HWBreakpoint), max_hw_bps); - return; -} - -int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) -{ - switch (type) { - case GDB_BREAKPOINT_HW: - return insert_hw_breakpoint(addr); - break; - case GDB_WATCHPOINT_READ: - case GDB_WATCHPOINT_WRITE: - case GDB_WATCHPOINT_ACCESS: - return insert_hw_watchpoint(addr, len, type); - default: - return -ENOSYS; - } -} - -int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) -{ - switch (type) { - case GDB_BREAKPOINT_HW: - return delete_hw_breakpoint(addr); - case GDB_WATCHPOINT_READ: - case GDB_WATCHPOINT_WRITE: - case GDB_WATCHPOINT_ACCESS: - return delete_hw_watchpoint(addr, len, type); - default: - return -ENOSYS; - } -} - - -void kvm_arch_remove_all_hw_breakpoints(void) -{ - if (cur_hw_wps > 0) { - g_array_remove_range(hw_watchpoints, 0, cur_hw_wps); - } - if (cur_hw_bps > 0) { - g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); - } -} - -void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) -{ - int i; - memset(ptr, 0, sizeof(struct kvm_guest_debug_arch)); - - for (i = 0; i < max_hw_wps; i++) { - HWWatchpoint *wp = get_hw_wp(i); - ptr->dbg_wcr[i] = wp->wcr; - ptr->dbg_wvr[i] = wp->wvr; - } - for (i = 0; i < max_hw_bps; i++) { - HWBreakpoint *bp = get_hw_bp(i); - ptr->dbg_bcr[i] = bp->bcr; - ptr->dbg_bvr[i] = bp->bvr; - } -} - -bool kvm_arm_hw_debug_active(CPUState *cs) -{ - return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); -} - -static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr, - const char *name) -{ - int err; - - err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr); - if (err != 0) { - error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); - return false; - } - - err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr); - if (err != 0) { - error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); - return false; - } - - return true; -} - -void kvm_arm_pmu_init(CPUState *cs) -{ - struct kvm_device_attr attr = { - .group = KVM_ARM_VCPU_PMU_V3_CTRL, - .attr = KVM_ARM_VCPU_PMU_V3_INIT, - }; - - if (!ARM_CPU(cs)->has_pmu) { - return; - } - if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) { - error_report("failed to init PMU"); - abort(); - } -} - -void kvm_arm_pmu_set_irq(CPUState *cs, int irq) -{ - struct kvm_device_attr attr = { - .group = KVM_ARM_VCPU_PMU_V3_CTRL, - .addr = (intptr_t)&irq, - .attr = KVM_ARM_VCPU_PMU_V3_IRQ, - }; - - if (!ARM_CPU(cs)->has_pmu) { - return; - } - if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) { - error_report("failed to set irq for PMU"); - abort(); - } -} - -void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa) -{ - struct kvm_device_attr attr = { - .group = KVM_ARM_VCPU_PVTIME_CTRL, - .attr = KVM_ARM_VCPU_PVTIME_IPA, - .addr = (uint64_t)&ipa, - }; - - if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) { - return; - } - if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) { - error_report("failed to init PVTIME IPA"); - abort(); - } -} - -static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) -{ - uint64_t ret; - struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; - int err; - - assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); - err = ioctl(fd, KVM_GET_ONE_REG, &idreg); - if (err < 0) { - return -1; - } - *pret = ret; - return 0; -} - -static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id) -{ - struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret }; - - assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); - return ioctl(fd, KVM_GET_ONE_REG, &idreg); -} - -static bool kvm_arm_pauth_supported(void) -{ - return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) && - kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); -} - -bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) -{ - /* Identify the feature bits corresponding to the host CPU, and - * fill out the ARMHostCPUClass fields accordingly. To do this - * we have to create a scratch VM, create a single CPU inside it, - * and then query that CPU for the relevant ID registers. - */ - int fdarray[3]; - bool sve_supported; - bool pmu_supported = false; - uint64_t features = 0; - int err; - - /* Old kernels may not know about the PREFERRED_TARGET ioctl: however - * we know these will only support creating one kind of guest CPU, - * which is its preferred CPU type. Fortunately these old kernels - * support only a very limited number of CPUs. - */ - static const uint32_t cpus_to_try[] = { - KVM_ARM_TARGET_AEM_V8, - KVM_ARM_TARGET_FOUNDATION_V8, - KVM_ARM_TARGET_CORTEX_A57, - QEMU_KVM_ARM_TARGET_NONE - }; - /* - * target = -1 informs kvm_arm_create_scratch_host_vcpu() - * to use the preferred target - */ - struct kvm_vcpu_init init = { .target = -1, }; - - /* - * Ask for SVE if supported, so that we can query ID_AA64ZFR0, - * which is otherwise RAZ. - */ - sve_supported = kvm_arm_sve_supported(); - if (sve_supported) { - init.features[0] |= 1 << KVM_ARM_VCPU_SVE; - } - - /* - * Ask for Pointer Authentication if supported, so that we get - * the unsanitized field values for AA64ISAR1_EL1. - */ - if (kvm_arm_pauth_supported()) { - init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | - 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); - } - - if (kvm_arm_pmu_supported()) { - init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; - pmu_supported = true; - } - - if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { - return false; - } - - ahcf->target = init.target; - ahcf->dtb_compatible = "arm,arm-v8"; - - err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, - ARM64_SYS_REG(3, 0, 0, 4, 0)); - if (unlikely(err < 0)) { - /* - * Before v4.15, the kernel only exposed a limited number of system - * registers, not including any of the interesting AArch64 ID regs. - * For the most part we could leave these fields as zero with minimal - * effect, since this does not affect the values seen by the guest. - * - * However, it could cause problems down the line for QEMU, - * so provide a minimal v8.0 default. - * - * ??? Could read MIDR and use knowledge from cpu64.c. - * ??? Could map a page of memory into our temp guest and - * run the tiniest of hand-crafted kernels to extract - * the values seen by the guest. - * ??? Either of these sounds like too much effort just - * to work around running a modern host kernel. - */ - ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ - err = 0; - } else { - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, - ARM64_SYS_REG(3, 0, 0, 4, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, - ARM64_SYS_REG(3, 0, 0, 4, 5)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, - ARM64_SYS_REG(3, 0, 0, 5, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, - ARM64_SYS_REG(3, 0, 0, 5, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, - ARM64_SYS_REG(3, 0, 0, 6, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, - ARM64_SYS_REG(3, 0, 0, 6, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, - ARM64_SYS_REG(3, 0, 0, 6, 2)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, - ARM64_SYS_REG(3, 0, 0, 7, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, - ARM64_SYS_REG(3, 0, 0, 7, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, - ARM64_SYS_REG(3, 0, 0, 7, 2)); - - /* - * Note that if AArch32 support is not present in the host, - * the AArch32 sysregs are present to be read, but will - * return UNKNOWN values. This is neither better nor worse - * than skipping the reads and leaving 0, as we must avoid - * considering the values in every case. - */ - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, - ARM64_SYS_REG(3, 0, 0, 1, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, - ARM64_SYS_REG(3, 0, 0, 1, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, - ARM64_SYS_REG(3, 0, 0, 1, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, - ARM64_SYS_REG(3, 0, 0, 1, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, - ARM64_SYS_REG(3, 0, 0, 1, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, - ARM64_SYS_REG(3, 0, 0, 1, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, - ARM64_SYS_REG(3, 0, 0, 1, 7)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, - ARM64_SYS_REG(3, 0, 0, 2, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, - ARM64_SYS_REG(3, 0, 0, 2, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, - ARM64_SYS_REG(3, 0, 0, 2, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, - ARM64_SYS_REG(3, 0, 0, 2, 3)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, - ARM64_SYS_REG(3, 0, 0, 2, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, - ARM64_SYS_REG(3, 0, 0, 2, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, - ARM64_SYS_REG(3, 0, 0, 2, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, - ARM64_SYS_REG(3, 0, 0, 2, 7)); - - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, - ARM64_SYS_REG(3, 0, 0, 3, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, - ARM64_SYS_REG(3, 0, 0, 3, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, - ARM64_SYS_REG(3, 0, 0, 3, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, - ARM64_SYS_REG(3, 0, 0, 3, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, - ARM64_SYS_REG(3, 0, 0, 3, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, - ARM64_SYS_REG(3, 0, 0, 3, 6)); - - /* - * DBGDIDR is a bit complicated because the kernel doesn't - * provide an accessor for it in 64-bit mode, which is what this - * scratch VM is in, and there's no architected "64-bit sysreg - * which reads the same as the 32-bit register" the way there is - * for other ID registers. Instead we synthesize a value from the - * AArch64 ID_AA64DFR0, the same way the kernel code in - * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. - * We only do this if the CPU supports AArch32 at EL1. - */ - if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { - int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); - int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); - int ctx_cmps = - FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); - int version = 6; /* ARMv8 debug architecture */ - bool has_el3 = - !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); - uint32_t dbgdidr = 0; - - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); - dbgdidr |= (1 << 15); /* RES1 bit */ - ahcf->isar.dbgdidr = dbgdidr; - } - - if (pmu_supported) { - /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, - ARM64_SYS_REG(3, 3, 9, 12, 0)); - } - - if (sve_supported) { - /* - * There is a range of kernels between kernel commit 73433762fcae - * and f81cb2c3ad41 which have a bug where the kernel doesn't - * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has - * enabled SVE support, which resulted in an error rather than RAZ. - * So only read the register if we set KVM_ARM_VCPU_SVE above. - */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, - ARM64_SYS_REG(3, 0, 0, 4, 4)); - } - } - - kvm_arm_destroy_scratch_host_vcpu(fdarray); - - if (err < 0) { - return false; - } - - /* - * We can assume any KVM supporting CPU is at least a v8 - * with VFPv4+Neon; this in turn implies most of the other - * feature bits. - */ - features |= 1ULL << ARM_FEATURE_V8; - features |= 1ULL << ARM_FEATURE_NEON; - features |= 1ULL << ARM_FEATURE_AARCH64; - features |= 1ULL << ARM_FEATURE_PMU; - features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; - - ahcf->features = features; - - return true; -} - -void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) -{ - bool has_steal_time = kvm_arm_steal_time_supported(); - - if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { - if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - cpu->kvm_steal_time = ON_OFF_AUTO_OFF; - } else { - cpu->kvm_steal_time = ON_OFF_AUTO_ON; - } - } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { - if (!has_steal_time) { - error_setg(errp, "'kvm-steal-time' cannot be enabled " - "on this host"); - return; - } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - /* - * DEN0057A chapter 2 says "This specification only covers - * systems in which the Execution state of the hypervisor - * as well as EL1 of virtual machines is AArch64.". And, - * to ensure that, the smc/hvc calls are only specified as - * smc64/hvc64. - */ - error_setg(errp, "'kvm-steal-time' cannot be enabled " - "for AArch32 guests"); - return; - } - } -} - -bool kvm_arm_aarch32_supported(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); -} - -bool kvm_arm_sve_supported(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); -} - -bool kvm_arm_steal_time_supported(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME); -} - -QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); - -uint32_t kvm_arm_sve_get_vls(CPUState *cs) -{ - /* Only call this function if kvm_arm_sve_supported() returns true. */ - static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS]; - static bool probed; - uint32_t vq = 0; - int i; - - /* - * KVM ensures all host CPUs support the same set of vector lengths. - * So we only need to create the scratch VCPUs once and then cache - * the results. - */ - if (!probed) { - struct kvm_vcpu_init init = { - .target = -1, - .features[0] = (1 << KVM_ARM_VCPU_SVE), - }; - struct kvm_one_reg reg = { - .id = KVM_REG_ARM64_SVE_VLS, - .addr = (uint64_t)&vls[0], - }; - int fdarray[3], ret; - - probed = true; - - if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { - error_report("failed to create scratch VCPU with SVE enabled"); - abort(); - } - ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®); - kvm_arm_destroy_scratch_host_vcpu(fdarray); - if (ret) { - error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s", - strerror(errno)); - abort(); - } - - for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { - if (vls[i]) { - vq = 64 - clz64(vls[i]) + i * 64; - break; - } - } - if (vq > ARM_MAX_VQ) { - warn_report("KVM supports vector lengths larger than " - "QEMU can enable"); - vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ); - } - } - - return vls[0]; -} - -static int kvm_arm_sve_set_vls(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(cs); - uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map }; - - assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); - - return kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_VLS, &vls[0]); -} - -#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 - -int kvm_arch_init_vcpu(CPUState *cs) -{ - int ret; - uint64_t mpidr; - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint64_t psciver; - - if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || - !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { - error_report("KVM is not supported for this guest CPU type"); - return -EINVAL; - } - - qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs); - - /* Determine init features for this CPU */ - memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); - if (cs->start_powered_off) { - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; - } - if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { - cpu->psci_version = QEMU_PSCI_VERSION_0_2; - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; - } - if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; - } - if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) { - cpu->has_pmu = false; - } - if (cpu->has_pmu) { - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; - } else { - env->features &= ~(1ULL << ARM_FEATURE_PMU); - } - if (cpu_isar_feature(aa64_sve, cpu)) { - assert(kvm_arm_sve_supported()); - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; - } - if (cpu_isar_feature(aa64_pauth, cpu)) { - cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | - 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); - } - - /* Do KVM_ARM_VCPU_INIT ioctl */ - ret = kvm_arm_vcpu_init(cs); - if (ret) { - return ret; - } - - if (cpu_isar_feature(aa64_sve, cpu)) { - ret = kvm_arm_sve_set_vls(cs); - if (ret) { - return ret; - } - ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE); - if (ret) { - return ret; - } - } - - /* - * KVM reports the exact PSCI version it is implementing via a - * special sysreg. If it is present, use its contents to determine - * what to report to the guest in the dtb (it is the PSCI version, - * in the same 15-bits major 16-bits minor format that PSCI_VERSION - * returns). - */ - if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) { - cpu->psci_version = psciver; - } - - /* - * When KVM is in use, PSCI is emulated in-kernel and not by qemu. - * Currently KVM has its own idea about MPIDR assignment, so we - * override our defaults with what we get from KVM. - */ - ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); - if (ret) { - return ret; - } - cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; - - /* Check whether user space can specify guest syndrome value */ - kvm_arm_init_serror_injection(cs); - - return kvm_arm_init_cpreg_list(cpu); -} - -int kvm_arch_destroy_vcpu(CPUState *cs) -{ - return 0; -} - -bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) -{ - /* Return true if the regidx is a register we should synchronize - * via the cpreg_tuples array (ie is not a core or sve reg that - * we sync by hand in kvm_arch_get/put_registers()) - */ - switch (regidx & KVM_REG_ARM_COPROC_MASK) { - case KVM_REG_ARM_CORE: - case KVM_REG_ARM64_SVE: - return false; - default: - return true; - } -} - -typedef struct CPRegStateLevel { - uint64_t regidx; - int level; -} CPRegStateLevel; - -/* All system registers not listed in the following table are assumed to be - * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less - * often, you must add it to this table with a state of either - * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. - */ -static const CPRegStateLevel non_runtime_cpregs[] = { - { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE }, - { KVM_REG_ARM_PTIMER_CNT, KVM_PUT_FULL_STATE }, -}; - -int kvm_arm_cpreg_level(uint64_t regidx) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) { - const CPRegStateLevel *l = &non_runtime_cpregs[i]; - if (l->regidx == regidx) { - return l->level; - } - } - - return KVM_PUT_RUNTIME_STATE; -} - -/* Callers must hold the iothread mutex lock */ -static void kvm_inject_arm_sea(CPUState *c) -{ - ARMCPU *cpu = ARM_CPU(c); - CPUARMState *env = &cpu->env; - uint32_t esr; - bool same_el; - - c->exception_index = EXCP_DATA_ABORT; - env->exception.target_el = 1; - - /* - * Set the DFSC to synchronous external abort and set FnV to not valid, - * this will tell guest the FAR_ELx is UNKNOWN for this abort. - */ - same_el = arm_current_el(env) == env->exception.target_el; - esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10); - - env->exception.syndrome = esr; - - arm_cpu_do_interrupt(c); -} - -#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ - KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) - -#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \ - KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) - -#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \ - KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) - -static int kvm_arch_put_fpsimd(CPUState *cs) -{ - CPUARMState *env = &ARM_CPU(cs)->env; - int i, ret; - - for (i = 0; i < 32; i++) { - uint64_t *q = aa64_vfp_qreg(env, i); -#if HOST_BIG_ENDIAN - uint64_t fp_val[2] = { q[1], q[0] }; - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), - fp_val); -#else - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); -#endif - if (ret) { - return ret; - } - } - - return 0; -} - -/* - * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits - * and PREGS and the FFR have a slice size of 256 bits. However we simply hard - * code the slice index to zero for now as it's unlikely we'll need more than - * one slice for quite some time. - */ -static int kvm_arch_put_sve(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint64_t tmp[ARM_MAX_VQ * 2]; - uint64_t *r; - int n, ret; - - for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { - r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); - ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); - if (ret) { - return ret; - } - } - - for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { - r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0], - DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); - if (ret) { - return ret; - } - } - - r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0], - DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); - if (ret) { - return ret; - } - - return 0; -} - -int kvm_arch_put_registers(CPUState *cs, int level) -{ - uint64_t val; - uint32_t fpr; - int i, ret; - unsigned int el; - - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - /* If we are in AArch32 mode then we need to copy the AArch32 regs to the - * AArch64 registers before pushing them out to 64-bit KVM. - */ - if (!is_a64(env)) { - aarch64_sync_32_to_64(env); - } - - for (i = 0; i < 31; i++) { - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), - &env->xregs[i]); - if (ret) { - return ret; - } - } - - /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the - * QEMU side we keep the current SP in xregs[31] as well. - */ - aarch64_save_sp(env, 1); - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); - if (ret) { - return ret; - } - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); - if (ret) { - return ret; - } - - /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */ - if (is_a64(env)) { - val = pstate_read(env); - } else { - val = cpsr_read(env); - } - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); - if (ret) { - return ret; - } - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); - if (ret) { - return ret; - } - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); - if (ret) { - return ret; - } - - /* Saved Program State Registers - * - * Before we restore from the banked_spsr[] array we need to - * ensure that any modifications to env->spsr are correctly - * reflected in the banks. - */ - el = arm_current_el(env); - if (el > 0 && !is_a64(env)) { - i = bank_number(env->uncached_cpsr & CPSR_M); - env->banked_spsr[i] = env->spsr; - } - - /* KVM 0-4 map to QEMU banks 1-5 */ - for (i = 0; i < KVM_NR_SPSR; i++) { - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]), - &env->banked_spsr[i + 1]); - if (ret) { - return ret; - } - } - - if (cpu_isar_feature(aa64_sve, cpu)) { - ret = kvm_arch_put_sve(cs); - } else { - ret = kvm_arch_put_fpsimd(cs); - } - if (ret) { - return ret; - } - - fpr = vfp_get_fpsr(env); - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); - if (ret) { - return ret; - } - - fpr = vfp_get_fpcr(env); - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); - if (ret) { - return ret; - } - - write_cpustate_to_list(cpu, true); - - if (!write_list_to_kvmstate(cpu, level)) { - return -EINVAL; - } - - /* - * Setting VCPU events should be triggered after syncing the registers - * to avoid overwriting potential changes made by KVM upon calling - * KVM_SET_VCPU_EVENTS ioctl - */ - ret = kvm_put_vcpu_events(cpu); - if (ret) { - return ret; - } - - kvm_arm_sync_mpstate_to_kvm(cpu); - - return ret; -} - -static int kvm_arch_get_fpsimd(CPUState *cs) -{ - CPUARMState *env = &ARM_CPU(cs)->env; - int i, ret; - - for (i = 0; i < 32; i++) { - uint64_t *q = aa64_vfp_qreg(env, i); - ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); - if (ret) { - return ret; - } else { -#if HOST_BIG_ENDIAN - uint64_t t; - t = q[0], q[0] = q[1], q[1] = t; -#endif - } - } - - return 0; -} - -/* - * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits - * and PREGS and the FFR have a slice size of 256 bits. However we simply hard - * code the slice index to zero for now as it's unlikely we'll need more than - * one slice for quite some time. - */ -static int kvm_arch_get_sve(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint64_t *r; - int n, ret; - - for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { - r = &env->vfp.zregs[n].d[0]; - ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); - if (ret) { - return ret; - } - sve_bswap64(r, r, cpu->sve_max_vq * 2); - } - - for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { - r = &env->vfp.pregs[n].p[0]; - ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); - if (ret) { - return ret; - } - sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - } - - r = &env->vfp.pregs[FFR_PRED_NUM].p[0]; - ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); - if (ret) { - return ret; - } - sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - - return 0; -} - -int kvm_arch_get_registers(CPUState *cs) -{ - uint64_t val; - unsigned int el; - uint32_t fpr; - int i, ret; - - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - for (i = 0; i < 31; i++) { - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), - &env->xregs[i]); - if (ret) { - return ret; - } - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); - if (ret) { - return ret; - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); - if (ret) { - return ret; - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); - if (ret) { - return ret; - } - - env->aarch64 = ((val & PSTATE_nRW) == 0); - if (is_a64(env)) { - pstate_write(env, val); - } else { - cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); - } - - /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the - * QEMU side we keep the current SP in xregs[31] as well. - */ - aarch64_restore_sp(env, 1); - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); - if (ret) { - return ret; - } - - /* If we are in AArch32 mode then we need to sync the AArch32 regs with the - * incoming AArch64 regs received from 64-bit KVM. - * We must perform this after all of the registers have been acquired from - * the kernel. - */ - if (!is_a64(env)) { - aarch64_sync_64_to_32(env); - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); - if (ret) { - return ret; - } - - /* Fetch the SPSR registers - * - * KVM SPSRs 0-4 map to QEMU banks 1-5 - */ - for (i = 0; i < KVM_NR_SPSR; i++) { - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]), - &env->banked_spsr[i + 1]); - if (ret) { - return ret; - } - } - - el = arm_current_el(env); - if (el > 0 && !is_a64(env)) { - i = bank_number(env->uncached_cpsr & CPSR_M); - env->spsr = env->banked_spsr[i]; - } - - if (cpu_isar_feature(aa64_sve, cpu)) { - ret = kvm_arch_get_sve(cs); - } else { - ret = kvm_arch_get_fpsimd(cs); - } - if (ret) { - return ret; - } - - ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); - if (ret) { - return ret; - } - vfp_set_fpsr(env, fpr); - - ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); - if (ret) { - return ret; - } - vfp_set_fpcr(env, fpr); - - ret = kvm_get_vcpu_events(cpu); - if (ret) { - return ret; - } - - if (!write_kvmstate_to_list(cpu)) { - return -EINVAL; - } - /* Note that it's OK to have registers which aren't in CPUState, - * so we can ignore a failure return here. - */ - write_list_to_cpustate(cpu); - - kvm_arm_sync_mpstate_to_qemu(cpu); - - /* TODO: other registers */ - return ret; -} - -void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) -{ - ram_addr_t ram_addr; - hwaddr paddr; - - assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); - - if (acpi_ghes_present() && addr) { - ram_addr = qemu_ram_addr_from_host(addr); - if (ram_addr != RAM_ADDR_INVALID && - kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { - kvm_hwpoison_page_add(ram_addr); - /* - * If this is a BUS_MCEERR_AR, we know we have been called - * synchronously from the vCPU thread, so we can easily - * synchronize the state and inject an error. - * - * TODO: we currently don't tell the guest at all about - * BUS_MCEERR_AO. In that case we might either be being - * called synchronously from the vCPU thread, or a bit - * later from the main thread, so doing the injection of - * the error would be more complicated. - */ - if (code == BUS_MCEERR_AR) { - kvm_cpu_synchronize_state(c); - if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { - kvm_inject_arm_sea(c); - } else { - error_report("failed to record the error"); - abort(); - } - } - return; - } - if (code == BUS_MCEERR_AO) { - error_report("Hardware memory error at addr %p for memory used by " - "QEMU itself instead of guest system!", addr); - } - } - - if (code == BUS_MCEERR_AR) { - error_report("Hardware memory error!"); - exit(1); - } -} - -/* C6.6.29 BRK instruction */ -static const uint32_t brk_insn = 0xd4200000; - -int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) -{ - if (have_guest_debug) { - if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || - cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { - return -EINVAL; - } - return 0; - } else { - error_report("guest debug not supported on this kernel"); - return -EINVAL; - } -} - -int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) -{ - static uint32_t brk; - - if (have_guest_debug) { - if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || - brk != brk_insn || - cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { - return -EINVAL; - } - return 0; - } else { - error_report("guest debug not supported on this kernel"); - return -EINVAL; - } -} - -/* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register - * - * To minimise translating between kernel and user-space the kernel - * ABI just provides user-space with the full exception syndrome - * register value to be decoded in QEMU. - */ - -bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) -{ - int hsr_ec = syn_get_ec(debug_exit->hsr); - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - /* Ensure PC is synchronised */ - kvm_cpu_synchronize_state(cs); - - switch (hsr_ec) { - case EC_SOFTWARESTEP: - if (cs->singlestep_enabled) { - return true; - } else { - /* - * The kernel should have suppressed the guest's ability to - * single step at this point so something has gone wrong. - */ - error_report("%s: guest single-step while debugging unsupported" - " (%"PRIx64", %"PRIx32")", - __func__, env->pc, debug_exit->hsr); - return false; - } - break; - case EC_AA64_BKPT: - if (kvm_find_sw_breakpoint(cs, env->pc)) { - return true; - } - break; - case EC_BREAKPOINT: - if (find_hw_breakpoint(cs, env->pc)) { - return true; - } - break; - case EC_WATCHPOINT: - { - CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); - if (wp) { - cs->watchpoint_hit = wp; - return true; - } - break; - } - default: - error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", - __func__, debug_exit->hsr, env->pc); - } - - /* If we are not handling the debug exception it must belong to - * the guest. Let's re-use the existing TCG interrupt code to set - * everything up properly. - */ - cs->exception_index = EXCP_BKPT; - env->exception.syndrome = debug_exit->hsr; - env->exception.vaddress = debug_exit->far; - env->exception.target_el = 1; - qemu_mutex_lock_iothread(); - arm_cpu_do_interrupt(cs); - qemu_mutex_unlock_iothread(); - - return false; -} - -#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0) -#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2) - -/* - * ESR_EL1 - * ISS encoding - * AARCH64: DFSC, bits [5:0] - * AARCH32: - * TTBCR.EAE == 0 - * FS[4] - DFSR[10] - * FS[3:0] - DFSR[3:0] - * TTBCR.EAE == 1 - * FS, bits [5:0] - */ -#define ESR_DFSC(aarch64, lpae, v) \ - ((aarch64 || (lpae)) ? ((v) & 0x3F) \ - : (((v) >> 6) | ((v) & 0x1F))) - -#define ESR_DFSC_EXTABT(aarch64, lpae) \ - ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8) - -bool kvm_arm_verify_ext_dabt_pending(CPUState *cs) -{ - uint64_t dfsr_val; - - if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) { - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64); - int lpae = 0; - - if (!aarch64_mode) { - uint64_t ttbcr; - - if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) { - lpae = arm_feature(env, ARM_FEATURE_LPAE) - && (ttbcr & TTBCR_EAE); - } - } - /* - * The verification here is based on the DFSC bits - * of the ESR_EL1 reg only - */ - return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) == - ESR_DFSC_EXTABT(aarch64_mode, lpae)); - } - return false; -} diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 051a0da41c..cfaa0d9bc7 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -12,46 +12,10 @@ #define QEMU_KVM_ARM_H #include "sysemu/kvm.h" -#include "exec/memory.h" -#include "qemu/error-report.h" #define KVM_ARM_VGIC_V2 (1 << 0) #define KVM_ARM_VGIC_V3 (1 << 1) -/** - * kvm_arm_init_debug() - initialize guest debug capabilities - * @s: KVMState - * - * Should be called only once before using guest debug capabilities. - */ -void kvm_arm_init_debug(KVMState *s); - -/** - * kvm_arm_vcpu_init: - * @cs: CPUState - * - * Initialize (or reinitialize) the VCPU by invoking the - * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature - * bitmask specified in the CPUState. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_arm_vcpu_init(CPUState *cs); - -/** - * kvm_arm_vcpu_finalize: - * @cs: CPUState - * @feature: feature to finalize - * - * Finalizes the configuration of the specified VCPU feature by - * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring - * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of - * KVM's API documentation. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_arm_vcpu_finalize(CPUState *cs, int feature); - /** * kvm_arm_register_device: * @mr: memory region for this device @@ -73,37 +37,6 @@ int kvm_arm_vcpu_finalize(CPUState *cs, int feature); void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group, uint64_t attr, int dev_fd, uint64_t addr_ormask); -/** - * kvm_arm_init_cpreg_list: - * @cpu: ARMCPU - * - * Initialize the ARMCPU cpreg list according to the kernel's - * definition of what CPU registers it knows about (and throw away - * the previous TCG-created cpreg list). - * - * Returns: 0 if success, else < 0 error code - */ -int kvm_arm_init_cpreg_list(ARMCPU *cpu); - -/** - * kvm_arm_reg_syncs_via_cpreg_list: - * @regidx: KVM register index - * - * Return true if this KVM register should be synchronized via the - * cpreg list of arbitrary system registers, false if it is synchronized - * by hand using code in kvm_arch_get/put_registers(). - */ -bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx); - -/** - * kvm_arm_cpreg_level: - * @regidx: KVM register index - * - * Return the level of this coprocessor/system register. Return value is - * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE. - */ -int kvm_arm_cpreg_level(uint64_t regidx); - /** * write_list_to_kvmstate: * @cpu: ARMCPU @@ -163,34 +96,6 @@ void kvm_arm_cpu_post_load(ARMCPU *cpu); */ void kvm_arm_reset_vcpu(ARMCPU *cpu); -/** - * kvm_arm_init_serror_injection: - * @cs: CPUState - * - * Check whether KVM can set guest SError syndrome. - */ -void kvm_arm_init_serror_injection(CPUState *cs); - -/** - * kvm_get_vcpu_events: - * @cpu: ARMCPU - * - * Get VCPU related state from kvm. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_get_vcpu_events(ARMCPU *cpu); - -/** - * kvm_put_vcpu_events: - * @cpu: ARMCPU - * - * Put VCPU related state to kvm. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_put_vcpu_events(ARMCPU *cpu); - #ifdef CONFIG_KVM /** * kvm_arm_create_scratch_host_vcpu: @@ -222,37 +127,15 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, */ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray); -/** - * ARMHostCPUFeatures: information about the host CPU (identified - * by asking the host kernel) - */ -typedef struct ARMHostCPUFeatures { - ARMISARegisters isar; - uint64_t features; - uint32_t target; - const char *dtb_compatible; -} ARMHostCPUFeatures; - -/** - * kvm_arm_get_host_cpu_features: - * @ahcf: ARMHostCPUClass to fill in - * - * Probe the capabilities of the host kernel's preferred CPU and fill - * in the ARMHostCPUClass struct accordingly. - * - * Returns true on success and false otherwise. - */ -bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf); - /** * kvm_arm_sve_get_vls: - * @cs: CPUState + * @cpu: ARMCPU * * Get all the SVE vector lengths supported by the KVM host, setting * the bits corresponding to their length in quadwords minus one * (vq - 1) up to ARM_MAX_VQ. Return the resulting map. */ -uint32_t kvm_arm_sve_get_vls(CPUState *cs); +uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu); /** * kvm_arm_set_cpu_features_from_host: @@ -265,12 +148,12 @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu); /** * kvm_arm_add_vcpu_properties: - * @obj: The CPU object to add the properties to + * @cpu: The CPU object to add the properties to * * Add all KVM specific CPU properties to the CPU object. These * are the CPU properties with "kvm-" prefixed names. */ -void kvm_arm_add_vcpu_properties(Object *obj); +void kvm_arm_add_vcpu_properties(ARMCPU *cpu); /** * kvm_arm_steal_time_finalize: @@ -282,14 +165,6 @@ void kvm_arm_add_vcpu_properties(Object *obj); */ void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp); -/** - * kvm_arm_steal_time_supported: - * - * Returns: true if KVM can enable steal time reporting - * and false otherwise. - */ -bool kvm_arm_steal_time_supported(void); - /** * kvm_arm_aarch32_supported: * @@ -323,57 +198,19 @@ bool kvm_arm_sve_supported(void); */ int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa); -/** - * kvm_arm_sync_mpstate_to_kvm: - * @cpu: ARMCPU - * - * If supported set the KVM MP_STATE based on QEMU's model. - * - * Returns 0 on success and -1 on failure. - */ -int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu); - -/** - * kvm_arm_sync_mpstate_to_qemu: - * @cpu: ARMCPU - * - * If supported get the MP_STATE from KVM and store in QEMU's model. - * - * Returns 0 on success and aborts on failure. - */ -int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu); - -/** - * kvm_arm_get_virtual_time: - * @cs: CPUState - * - * Gets the VCPU's virtual counter and stores it in the KVM CPU state. - */ -void kvm_arm_get_virtual_time(CPUState *cs); - -/** - * kvm_arm_put_virtual_time: - * @cs: CPUState - * - * Sets the VCPU's virtual counter to the value stored in the KVM CPU state. - */ -void kvm_arm_put_virtual_time(CPUState *cs); - -void kvm_arm_vm_state_change(void *opaque, bool running, RunState state); - int kvm_arm_vgic_probe(void); -void kvm_arm_pmu_set_irq(CPUState *cs, int irq); -void kvm_arm_pmu_init(CPUState *cs); +void kvm_arm_pmu_init(ARMCPU *cpu); +void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq); /** * kvm_arm_pvtime_init: - * @cs: CPUState + * @cpu: ARMCPU * @ipa: Per-vcpu guest physical base address of the pvtime structures * * Initializes PVTIME for the VCPU, setting the PVTIME IPA to @ipa. */ -void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa); +void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa); int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); @@ -398,11 +235,6 @@ static inline bool kvm_arm_sve_supported(void) return false; } -static inline bool kvm_arm_steal_time_supported(void) -{ - return false; -} - /* * These functions should never actually be called without KVM support. */ @@ -411,7 +243,7 @@ static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) g_assert_not_reached(); } -static inline void kvm_arm_add_vcpu_properties(Object *obj) +static inline void kvm_arm_add_vcpu_properties(ARMCPU *cpu) { g_assert_not_reached(); } @@ -426,17 +258,17 @@ static inline int kvm_arm_vgic_probe(void) g_assert_not_reached(); } -static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq) +static inline void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq) { g_assert_not_reached(); } -static inline void kvm_arm_pmu_init(CPUState *cs) +static inline void kvm_arm_pmu_init(ARMCPU *cpu) { g_assert_not_reached(); } -static inline void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa) +static inline void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa) { g_assert_not_reached(); } @@ -446,48 +278,11 @@ static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) g_assert_not_reached(); } -static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs) +static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) { g_assert_not_reached(); } #endif -/** - * kvm_arm_handle_debug: - * @cs: CPUState - * @debug_exit: debug part of the KVM exit structure - * - * Returns: TRUE if the debug exception was handled. - */ -bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit); - -/** - * kvm_arm_hw_debug_active: - * @cs: CPU State - * - * Return: TRUE if any hardware breakpoints in use. - */ -bool kvm_arm_hw_debug_active(CPUState *cs); - -/** - * kvm_arm_copy_hw_debug_data: - * @ptr: kvm_guest_debug_arch structure - * - * Copy the architecture specific debug registers into the - * kvm_guest_debug ioctl structure. - */ -struct kvm_guest_debug_arch; -void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr); - -/** - * kvm_arm_verify_ext_dabt_pending: - * @cs: CPUState - * - * Verify the fault status code wrt the Ext DABT injection - * - * Returns: true if the fault status code is as expected, false otherwise - */ -bool kvm_arm_verify_ext_dabt_pending(CPUState *cs); - #endif diff --git a/target/arm/meson.build b/target/arm/meson.build index 5d04a8e94f..d6c3902e67 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -8,7 +8,7 @@ arm_ss.add(files( )) arm_ss.add(zlib) -arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c')) +arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'), if_false: files('kvm-stub.c')) arm_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c')) arm_ss.add(when: 'TARGET_AARCH64', if_true: files( diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c index ea08936a85..9de0fa2d1f 100644 --- a/target/arm/tcg/op_helper.c +++ b/target/arm/tcg/op_helper.c @@ -121,6 +121,61 @@ void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue) } } +/* Sign/zero extend */ +uint32_t HELPER(sxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(int8_t)x; + res |= (uint32_t)(int8_t)(x >> 16) << 16; + return res; +} + +static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) +{ + /* + * Take a division-by-zero exception if necessary; otherwise return + * to get the usual non-trapping division behaviour (result of 0) + */ + if (arm_feature(env, ARM_FEATURE_M) + && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { + raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); + } +} + +uint32_t HELPER(uxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(uint8_t)x; + res |= (uint32_t)(uint8_t)(x >> 16) << 16; + return res; +} + +int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) +{ + if (den == 0) { + handle_possible_div0_trap(env, GETPC()); + return 0; + } + if (num == INT_MIN && den == -1) { + return INT_MIN; + } + return num / den; +} + +uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) +{ + if (den == 0) { + handle_possible_div0_trap(env, GETPC()); + return 0; + } + return num / den; +} + +uint32_t HELPER(rbit)(uint32_t x) +{ + return revbit32(x); +} + uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index a2e49c39f9..f3b5b9124d 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "exec/exec-all.h" #include "translate.h" #include "translate-a64.h" #include "qemu/log.h"