mirror of https://github.com/xemu-project/xemu.git
ppc-7.0 queue
* target/ppc: SPR registration cleanups (Fabiano) * ppc: nested KVM HV for spapr virtual hypervisor (Nicholas) * spapr: nvdimm: Introduce spapr-nvdimm device (Shivaprasad) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEoPZlSPBIlev+awtgUaNDx8/77KEFAmIPUeEACgkQUaNDx8/7 7KF5Ng/+JRWCXI9p6cJXlNuZD7BLIqBHF4yhwDQD7GKUSnfaPzSgII26oC3K7roB dyRg8cxA1GXGws/Zbtv12/9y+lkTCg/9LJHlgAE9P3knV4OVEJqUSuMhAj0pzGO3 F/xEjVlBSqsqhjC83np22M+OPDggYnRdQpVlopfBQ+wICVRXON4YhqqwB9NoYCD0 n7vm6WYtiNOz/ohJgP3WY8qvj1d5qrsbiaEzmxoe+qDk919UJ9MIx65ZtsrDHOgx ntKJRQ1rOQZi6Ymqb0n1SJ2e1sqri06kAnuQEG0y7Bz4X2Cb7+Jprz7htadfI0LX 1ZWQFnzyAbT4MkmFPSep4w6MZZRJOt2x9Oh/iknM6u1AIt8NHxJ23CVKVeVMSeZg MDFO018Yemj7IfTGE1OBEQWqGu+gdH3hWviv5DThB0BmRdqY4chZpVo6Ev55v2BF 73YKPLBwgP3aZOovmCqJFmWKGVjforO/kSRvPi6+yGFxV5dUfYVFUR6k3BZnAsU/ VxOwxvCpd4Bg2m6yp+k6x0G3yxznmd1KwtEV5QlQqNEkCmgXK0Pb0Qm6Ru5fsZup qRxkyNRZfjderG7l8g7RvtsG+UolaEBVj/HHxPFxrEbiAOKqXyzMmOUN78VyHQbg 0ieKTKPhUndxNPfjNFBFaDvqXW9AliQu0NveS0dK6oGtFk8OEUk= =FIN4 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/legoater/tags/pull-ppc-20220218' into staging ppc-7.0 queue * target/ppc: SPR registration cleanups (Fabiano) * ppc: nested KVM HV for spapr virtual hypervisor (Nicholas) * spapr: nvdimm: Introduce spapr-nvdimm device (Shivaprasad) # gpg: Signature made Fri 18 Feb 2022 07:59:29 GMT # gpg: using RSA key A0F66548F04895EBFE6B0B6051A343C7CFFBECA1 # gpg: Good signature from "Cédric Le Goater <clg@kaod.org>" [undefined] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: A0F6 6548 F048 95EB FE6B 0B60 51A3 43C7 CFFB ECA1 * remotes/legoater/tags/pull-ppc-20220218: (39 commits) target/ppc: Move common SPR functions out of cpu_init target/ppc: cpu_init: Move check_pow and QOM macros to a header target/ppc: cpu_init: Move SPR registration macros to a header target/ppc: cpu_init: Expose some SPR registration helpers target/ppc: Rename spr_tcg.h to spr_common.h target/ppc: cpu_init: Remove register_usprg3_sprs target/ppc: cpu_init: Rename register_ne_601_sprs target/ppc: cpu_init: Reuse init_proc_745 for the 755 target/ppc: cpu_init: Reuse init_proc_604 for the 604e target/ppc: cpu_init: Reuse init_proc_603 for the e300 target/ppc: cpu_init: Move 604e SPR registration into a function target/ppc: cpu_init: Move e300 SPR registration into a function target/ppc: cpu_init: Move 755 L2 cache SPRs into a function target/ppc: cpu_init: Deduplicate 7xx SPR registration target/ppc: cpu_init: Deduplicate 745/755 SPR registration target/ppc: cpu_init: Deduplicate 604 SPR registration target/ppc: cpu_init: Deduplicate 603 SPR registration target/ppc: cpu_init: Deduplicate 440 SPR registration target/ppc: cpu_init: Decouple 74xx SPR registration from 7xx target/ppc: cpu_init: Decouple G2 SPR registration from 755 ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
e670f6d825
|
@ -181,10 +181,25 @@ static MemoryRegion *nvdimm_md_get_memory_region(MemoryDeviceState *md,
|
|||
static void nvdimm_realize(PCDIMMDevice *dimm, Error **errp)
|
||||
{
|
||||
NVDIMMDevice *nvdimm = NVDIMM(dimm);
|
||||
NVDIMMClass *ndc = NVDIMM_GET_CLASS(nvdimm);
|
||||
|
||||
if (!nvdimm->nvdimm_mr) {
|
||||
nvdimm_prepare_memory_region(nvdimm, errp);
|
||||
}
|
||||
|
||||
if (ndc->realize) {
|
||||
ndc->realize(nvdimm, errp);
|
||||
}
|
||||
}
|
||||
|
||||
static void nvdimm_unrealize(PCDIMMDevice *dimm)
|
||||
{
|
||||
NVDIMMDevice *nvdimm = NVDIMM(dimm);
|
||||
NVDIMMClass *ndc = NVDIMM_GET_CLASS(nvdimm);
|
||||
|
||||
if (ndc->unrealize) {
|
||||
ndc->unrealize(nvdimm);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -240,6 +255,7 @@ static void nvdimm_class_init(ObjectClass *oc, void *data)
|
|||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
|
||||
ddc->realize = nvdimm_realize;
|
||||
ddc->unrealize = nvdimm_unrealize;
|
||||
mdc->get_memory_region = nvdimm_md_get_memory_region;
|
||||
device_class_set_props(dc, nvdimm_properties);
|
||||
|
||||
|
|
|
@ -216,6 +216,11 @@ static void pc_dimm_realize(DeviceState *dev, Error **errp)
|
|||
static void pc_dimm_unrealize(DeviceState *dev)
|
||||
{
|
||||
PCDIMMDevice *dimm = PC_DIMM(dev);
|
||||
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
|
||||
|
||||
if (ddc->unrealize) {
|
||||
ddc->unrealize(dimm);
|
||||
}
|
||||
|
||||
host_memory_backend_set_mapped(dimm->hostmem, false);
|
||||
}
|
||||
|
|
|
@ -449,6 +449,11 @@ static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm,
|
|||
}
|
||||
}
|
||||
|
||||
static bool pegasos2_cpu_in_nested(PowerPCCPU *cpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
|
||||
{
|
||||
Pegasos2MachineState *pm = PEGASOS2_MACHINE(vhyp);
|
||||
|
@ -504,6 +509,7 @@ static void pegasos2_machine_class_init(ObjectClass *oc, void *data)
|
|||
mc->default_ram_id = "pegasos2.ram";
|
||||
mc->default_ram_size = 512 * MiB;
|
||||
|
||||
vhc->cpu_in_nested = pegasos2_cpu_in_nested;
|
||||
vhc->hypercall = pegasos2_hypercall;
|
||||
vhc->cpu_exec_enter = vhyp_nop;
|
||||
vhc->cpu_exec_exit = vhyp_nop;
|
||||
|
|
23
hw/ppc/ppc.c
23
hw/ppc/ppc.c
|
@ -1072,7 +1072,7 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
|
|||
}
|
||||
/* Create new timer */
|
||||
tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
|
||||
if (env->has_hv_mode) {
|
||||
if (env->has_hv_mode && !cpu->vhyp) {
|
||||
tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
|
||||
cpu);
|
||||
} else {
|
||||
|
@ -1083,6 +1083,27 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
|
|||
return &cpu_ppc_set_tb_clk;
|
||||
}
|
||||
|
||||
/* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */
|
||||
void cpu_ppc_hdecr_init(CPUPPCState *env)
|
||||
{
|
||||
PowerPCCPU *cpu = env_archcpu(env);
|
||||
|
||||
assert(env->tb_env->hdecr_timer == NULL);
|
||||
|
||||
env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||
&cpu_ppc_hdecr_cb, cpu);
|
||||
}
|
||||
|
||||
void cpu_ppc_hdecr_exit(CPUPPCState *env)
|
||||
{
|
||||
PowerPCCPU *cpu = env_archcpu(env);
|
||||
|
||||
timer_free(env->tb_env->hdecr_timer);
|
||||
env->tb_env->hdecr_timer = NULL;
|
||||
|
||||
cpu_ppc_hdecr_lower(cpu);
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
/* PowerPC 40x timers */
|
||||
|
||||
|
|
|
@ -1270,6 +1270,8 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
|
|||
/* The TCG path should also be holding the BQL at this point */
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
g_assert(!vhyp_cpu_in_nested(cpu));
|
||||
|
||||
if (msr_pr) {
|
||||
hcall_dprintf("Hypercall made with MSR[PR]=1\n");
|
||||
env->gpr[3] = H_PRIVILEGE;
|
||||
|
@ -1309,13 +1311,40 @@ void spapr_set_all_lpcrs(target_ulong value, target_ulong mask)
|
|||
}
|
||||
}
|
||||
|
||||
static void spapr_get_pate(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry)
|
||||
static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
|
||||
target_ulong lpid, ppc_v3_pate_t *entry)
|
||||
{
|
||||
SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
|
||||
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
|
||||
|
||||
/* Copy PATE1:GR into PATE0:HR */
|
||||
entry->dw0 = spapr->patb_entry & PATE0_HR;
|
||||
entry->dw1 = spapr->patb_entry;
|
||||
if (!spapr_cpu->in_nested) {
|
||||
assert(lpid == 0);
|
||||
|
||||
/* Copy PATE1:GR into PATE0:HR */
|
||||
entry->dw0 = spapr->patb_entry & PATE0_HR;
|
||||
entry->dw1 = spapr->patb_entry;
|
||||
|
||||
} else {
|
||||
uint64_t patb, pats;
|
||||
|
||||
assert(lpid != 0);
|
||||
|
||||
patb = spapr->nested_ptcr & PTCR_PATB;
|
||||
pats = spapr->nested_ptcr & PTCR_PATS;
|
||||
|
||||
/* Calculate number of entries */
|
||||
pats = 1ull << (pats + 12 - 4);
|
||||
if (pats <= lpid) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Grab entry */
|
||||
patb += 16 * lpid;
|
||||
entry->dw0 = ldq_phys(CPU(cpu)->as, patb);
|
||||
entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
|
||||
|
@ -1634,6 +1663,8 @@ static void spapr_machine_reset(MachineState *machine)
|
|||
spapr->ov5_cas = spapr_ovec_clone(spapr->ov5);
|
||||
}
|
||||
|
||||
spapr_nvdimm_finish_flushes();
|
||||
|
||||
/* DRC reset may cause a device to be unplugged. This will cause troubles
|
||||
* if this device is used by another device (eg, a running vhost backend
|
||||
* will crash QEMU if the DIMM holding the vring goes away). To avoid such
|
||||
|
@ -4465,6 +4496,13 @@ PowerPCCPU *spapr_find_cpu(int vcpu_id)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static bool spapr_cpu_in_nested(PowerPCCPU *cpu)
|
||||
{
|
||||
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
|
||||
|
||||
return spapr_cpu->in_nested;
|
||||
}
|
||||
|
||||
static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
|
||||
{
|
||||
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
|
||||
|
@ -4573,6 +4611,8 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
|
|||
fwc->get_dev_path = spapr_get_fw_dev_path;
|
||||
nc->nmi_monitor_handler = spapr_nmi;
|
||||
smc->phb_placement = spapr_phb_placement;
|
||||
vhc->cpu_in_nested = spapr_cpu_in_nested;
|
||||
vhc->deliver_hv_excp = spapr_exit_nested;
|
||||
vhc->hypercall = emulate_spapr_hypercall;
|
||||
vhc->hpt_mask = spapr_hpt_mask;
|
||||
vhc->map_hptes = spapr_map_hptes;
|
||||
|
|
|
@ -444,19 +444,23 @@ static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr,
|
|||
{
|
||||
ERRP_GUARD();
|
||||
PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
if (!val) {
|
||||
/* capability disabled by default */
|
||||
return;
|
||||
}
|
||||
|
||||
if (tcg_enabled()) {
|
||||
error_setg(errp, "No Nested KVM-HV support in TCG");
|
||||
if (!(env->insns_flags2 & PPC2_ISA300)) {
|
||||
error_setg(errp, "Nested-HV only supported on POWER9 and later");
|
||||
error_append_hint(errp, "Try appending -machine cap-nested-hv=off\n");
|
||||
} else if (kvm_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (kvm_enabled()) {
|
||||
if (!ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,
|
||||
spapr->max_compat_pvr)) {
|
||||
error_setg(errp, "Nested KVM-HV only supported on POWER9");
|
||||
error_setg(errp, "Nested-HV only supported on POWER9 and later");
|
||||
error_append_hint(errp,
|
||||
"Try appending -machine max-cpu-compat=power9\n");
|
||||
return;
|
||||
|
@ -464,7 +468,7 @@ static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr,
|
|||
|
||||
if (!kvmppc_has_cap_nested_kvm_hv()) {
|
||||
error_setg(errp,
|
||||
"KVM implementation does not support Nested KVM-HV");
|
||||
"KVM implementation does not support Nested-HV");
|
||||
error_append_hint(errp,
|
||||
"Try appending -machine cap-nested-hv=off\n");
|
||||
} else if (kvmppc_set_cap_nested_kvm_hv(val) < 0) {
|
||||
|
|
|
@ -261,12 +261,12 @@ static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Set time-base frequency to 512 MHz */
|
||||
cpu_ppc_tb_init(env, SPAPR_TIMEBASE_FREQ);
|
||||
|
||||
cpu_ppc_set_vhyp(cpu, PPC_VIRTUAL_HYPERVISOR(spapr));
|
||||
kvmppc_set_papr(cpu);
|
||||
|
||||
/* Set time-base frequency to 512 MHz. vhyp must be set first. */
|
||||
cpu_ppc_tb_init(env, SPAPR_TIMEBASE_FREQ);
|
||||
|
||||
if (spapr_irq_cpu_intc_create(spapr, cpu, errp) < 0) {
|
||||
qdev_unrealize(DEVICE(cpu));
|
||||
return false;
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "qemu/error-report.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "helper_regs.h"
|
||||
#include "hw/ppc/ppc.h"
|
||||
#include "hw/ppc/spapr.h"
|
||||
#include "hw/ppc/spapr_cpu_core.h"
|
||||
#include "mmu-hash64.h"
|
||||
|
@ -1497,6 +1498,333 @@ static void hypercall_register_softmmu(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* TCG only */
|
||||
#define PRTS_MASK 0x1f
|
||||
|
||||
static target_ulong h_set_ptbl(PowerPCCPU *cpu,
|
||||
SpaprMachineState *spapr,
|
||||
target_ulong opcode,
|
||||
target_ulong *args)
|
||||
{
|
||||
target_ulong ptcr = args[0];
|
||||
|
||||
if (!spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) {
|
||||
return H_FUNCTION;
|
||||
}
|
||||
|
||||
if ((ptcr & PRTS_MASK) + 12 - 4 > 12) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
spapr->nested_ptcr = ptcr; /* Save new partition table */
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_tlb_invalidate(PowerPCCPU *cpu,
|
||||
SpaprMachineState *spapr,
|
||||
target_ulong opcode,
|
||||
target_ulong *args)
|
||||
{
|
||||
/*
|
||||
* The spapr virtual hypervisor nested HV implementation retains no L2
|
||||
* translation state except for TLB. And the TLB is always invalidated
|
||||
* across L1<->L2 transitions, so nothing is required here.
|
||||
*/
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu,
|
||||
SpaprMachineState *spapr,
|
||||
target_ulong opcode,
|
||||
target_ulong *args)
|
||||
{
|
||||
/*
|
||||
* This HCALL is not required, L1 KVM will take a slow path and walk the
|
||||
* page tables manually to do the data copy.
|
||||
*/
|
||||
return H_FUNCTION;
|
||||
}
|
||||
|
||||
/*
|
||||
* When this handler returns, the environment is switched to the L2 guest
|
||||
* and TCG begins running that. spapr_exit_nested() performs the switch from
|
||||
* L2 back to L1 and returns from the H_ENTER_NESTED hcall.
|
||||
*/
|
||||
static target_ulong h_enter_nested(PowerPCCPU *cpu,
|
||||
SpaprMachineState *spapr,
|
||||
target_ulong opcode,
|
||||
target_ulong *args)
|
||||
{
|
||||
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
|
||||
target_ulong hv_ptr = args[0];
|
||||
target_ulong regs_ptr = args[1];
|
||||
target_ulong hdec, now = cpu_ppc_load_tbl(env);
|
||||
target_ulong lpcr, lpcr_mask;
|
||||
struct kvmppc_hv_guest_state *hvstate;
|
||||
struct kvmppc_hv_guest_state hv_state;
|
||||
struct kvmppc_pt_regs *regs;
|
||||
hwaddr len;
|
||||
uint64_t cr;
|
||||
int i;
|
||||
|
||||
if (spapr->nested_ptcr == 0) {
|
||||
return H_NOT_AVAILABLE;
|
||||
}
|
||||
|
||||
len = sizeof(*hvstate);
|
||||
hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, false,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
if (len != sizeof(*hvstate)) {
|
||||
address_space_unmap(CPU(cpu)->as, hvstate, len, 0, false);
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
memcpy(&hv_state, hvstate, len);
|
||||
|
||||
address_space_unmap(CPU(cpu)->as, hvstate, len, len, false);
|
||||
|
||||
/*
|
||||
* We accept versions 1 and 2. Version 2 fields are unused because TCG
|
||||
* does not implement DAWR*.
|
||||
*/
|
||||
if (hv_state.version > HV_GUEST_STATE_VERSION) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
spapr_cpu->nested_host_state = g_try_malloc(sizeof(CPUPPCState));
|
||||
if (!spapr_cpu->nested_host_state) {
|
||||
return H_NO_MEM;
|
||||
}
|
||||
|
||||
memcpy(spapr_cpu->nested_host_state, env, sizeof(CPUPPCState));
|
||||
|
||||
len = sizeof(*regs);
|
||||
regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, false,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
if (!regs || len != sizeof(*regs)) {
|
||||
address_space_unmap(CPU(cpu)->as, regs, len, 0, false);
|
||||
g_free(spapr_cpu->nested_host_state);
|
||||
return H_P2;
|
||||
}
|
||||
|
||||
len = sizeof(env->gpr);
|
||||
assert(len == sizeof(regs->gpr));
|
||||
memcpy(env->gpr, regs->gpr, len);
|
||||
|
||||
env->lr = regs->link;
|
||||
env->ctr = regs->ctr;
|
||||
cpu_write_xer(env, regs->xer);
|
||||
|
||||
cr = regs->ccr;
|
||||
for (i = 7; i >= 0; i--) {
|
||||
env->crf[i] = cr & 15;
|
||||
cr >>= 4;
|
||||
}
|
||||
|
||||
env->msr = regs->msr;
|
||||
env->nip = regs->nip;
|
||||
|
||||
address_space_unmap(CPU(cpu)->as, regs, len, len, false);
|
||||
|
||||
env->cfar = hv_state.cfar;
|
||||
|
||||
assert(env->spr[SPR_LPIDR] == 0);
|
||||
env->spr[SPR_LPIDR] = hv_state.lpid;
|
||||
|
||||
lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER;
|
||||
lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | (hv_state.lpcr & lpcr_mask);
|
||||
lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE;
|
||||
lpcr &= ~LPCR_LPES0;
|
||||
env->spr[SPR_LPCR] = lpcr & pcc->lpcr_mask;
|
||||
|
||||
env->spr[SPR_PCR] = hv_state.pcr;
|
||||
/* hv_state.amor is not used */
|
||||
env->spr[SPR_DPDES] = hv_state.dpdes;
|
||||
env->spr[SPR_HFSCR] = hv_state.hfscr;
|
||||
hdec = hv_state.hdec_expiry - now;
|
||||
spapr_cpu->nested_tb_offset = hv_state.tb_offset;
|
||||
/* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/
|
||||
env->spr[SPR_SRR0] = hv_state.srr0;
|
||||
env->spr[SPR_SRR1] = hv_state.srr1;
|
||||
env->spr[SPR_SPRG0] = hv_state.sprg[0];
|
||||
env->spr[SPR_SPRG1] = hv_state.sprg[1];
|
||||
env->spr[SPR_SPRG2] = hv_state.sprg[2];
|
||||
env->spr[SPR_SPRG3] = hv_state.sprg[3];
|
||||
env->spr[SPR_BOOKS_PID] = hv_state.pidr;
|
||||
env->spr[SPR_PPR] = hv_state.ppr;
|
||||
|
||||
cpu_ppc_hdecr_init(env);
|
||||
cpu_ppc_store_hdecr(env, hdec);
|
||||
|
||||
/*
|
||||
* The hv_state.vcpu_token is not needed. It is used by the KVM
|
||||
* implementation to remember which L2 vCPU last ran on which physical
|
||||
* CPU so as to invalidate process scope translations if it is moved
|
||||
* between physical CPUs. For now TLBs are always flushed on L1<->L2
|
||||
* transitions so this is not a problem.
|
||||
*
|
||||
* Could validate that the same vcpu_token does not attempt to run on
|
||||
* different L1 vCPUs at the same time, but that would be a L1 KVM bug
|
||||
* and it's not obviously worth a new data structure to do it.
|
||||
*/
|
||||
|
||||
env->tb_env->tb_offset += spapr_cpu->nested_tb_offset;
|
||||
spapr_cpu->in_nested = true;
|
||||
|
||||
hreg_compute_hflags(env);
|
||||
tlb_flush(cs);
|
||||
env->reserve_addr = -1; /* Reset the reservation */
|
||||
|
||||
/*
|
||||
* The spapr hcall helper sets env->gpr[3] to the return value, but at
|
||||
* this point the L1 is not returning from the hcall but rather we
|
||||
* start running the L2, so r3 must not be clobbered, so return env->gpr[3]
|
||||
* to leave it unchanged.
|
||||
*/
|
||||
return env->gpr[3];
|
||||
}
|
||||
|
||||
void spapr_exit_nested(PowerPCCPU *cpu, int excp)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
|
||||
target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */
|
||||
target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4];
|
||||
target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5];
|
||||
struct kvmppc_hv_guest_state *hvstate;
|
||||
struct kvmppc_pt_regs *regs;
|
||||
hwaddr len;
|
||||
uint64_t cr;
|
||||
int i;
|
||||
|
||||
assert(spapr_cpu->in_nested);
|
||||
|
||||
cpu_ppc_hdecr_exit(env);
|
||||
|
||||
len = sizeof(*hvstate);
|
||||
hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
if (len != sizeof(*hvstate)) {
|
||||
address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true);
|
||||
r3_return = H_PARAMETER;
|
||||
goto out_restore_l1;
|
||||
}
|
||||
|
||||
hvstate->cfar = env->cfar;
|
||||
hvstate->lpcr = env->spr[SPR_LPCR];
|
||||
hvstate->pcr = env->spr[SPR_PCR];
|
||||
hvstate->dpdes = env->spr[SPR_DPDES];
|
||||
hvstate->hfscr = env->spr[SPR_HFSCR];
|
||||
|
||||
if (excp == POWERPC_EXCP_HDSI) {
|
||||
hvstate->hdar = env->spr[SPR_HDAR];
|
||||
hvstate->hdsisr = env->spr[SPR_HDSISR];
|
||||
hvstate->asdr = env->spr[SPR_ASDR];
|
||||
} else if (excp == POWERPC_EXCP_HISI) {
|
||||
hvstate->asdr = env->spr[SPR_ASDR];
|
||||
}
|
||||
|
||||
/* HEIR should be implemented for HV mode and saved here. */
|
||||
hvstate->srr0 = env->spr[SPR_SRR0];
|
||||
hvstate->srr1 = env->spr[SPR_SRR1];
|
||||
hvstate->sprg[0] = env->spr[SPR_SPRG0];
|
||||
hvstate->sprg[1] = env->spr[SPR_SPRG1];
|
||||
hvstate->sprg[2] = env->spr[SPR_SPRG2];
|
||||
hvstate->sprg[3] = env->spr[SPR_SPRG3];
|
||||
hvstate->pidr = env->spr[SPR_BOOKS_PID];
|
||||
hvstate->ppr = env->spr[SPR_PPR];
|
||||
|
||||
/* Is it okay to specify write length larger than actual data written? */
|
||||
address_space_unmap(CPU(cpu)->as, hvstate, len, len, true);
|
||||
|
||||
len = sizeof(*regs);
|
||||
regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
if (!regs || len != sizeof(*regs)) {
|
||||
address_space_unmap(CPU(cpu)->as, regs, len, 0, true);
|
||||
r3_return = H_P2;
|
||||
goto out_restore_l1;
|
||||
}
|
||||
|
||||
len = sizeof(env->gpr);
|
||||
assert(len == sizeof(regs->gpr));
|
||||
memcpy(regs->gpr, env->gpr, len);
|
||||
|
||||
regs->link = env->lr;
|
||||
regs->ctr = env->ctr;
|
||||
regs->xer = cpu_read_xer(env);
|
||||
|
||||
cr = 0;
|
||||
for (i = 0; i < 8; i++) {
|
||||
cr |= (env->crf[i] & 15) << (4 * (7 - i));
|
||||
}
|
||||
regs->ccr = cr;
|
||||
|
||||
if (excp == POWERPC_EXCP_MCHECK ||
|
||||
excp == POWERPC_EXCP_RESET ||
|
||||
excp == POWERPC_EXCP_SYSCALL) {
|
||||
regs->nip = env->spr[SPR_SRR0];
|
||||
regs->msr = env->spr[SPR_SRR1] & env->msr_mask;
|
||||
} else {
|
||||
regs->nip = env->spr[SPR_HSRR0];
|
||||
regs->msr = env->spr[SPR_HSRR1] & env->msr_mask;
|
||||
}
|
||||
|
||||
/* Is it okay to specify write length larger than actual data written? */
|
||||
address_space_unmap(CPU(cpu)->as, regs, len, len, true);
|
||||
|
||||
out_restore_l1:
|
||||
memcpy(env->gpr, spapr_cpu->nested_host_state->gpr, sizeof(env->gpr));
|
||||
env->lr = spapr_cpu->nested_host_state->lr;
|
||||
env->ctr = spapr_cpu->nested_host_state->ctr;
|
||||
memcpy(env->crf, spapr_cpu->nested_host_state->crf, sizeof(env->crf));
|
||||
env->cfar = spapr_cpu->nested_host_state->cfar;
|
||||
env->xer = spapr_cpu->nested_host_state->xer;
|
||||
env->so = spapr_cpu->nested_host_state->so;
|
||||
env->ov = spapr_cpu->nested_host_state->ov;
|
||||
env->ov32 = spapr_cpu->nested_host_state->ov32;
|
||||
env->ca32 = spapr_cpu->nested_host_state->ca32;
|
||||
env->msr = spapr_cpu->nested_host_state->msr;
|
||||
env->nip = spapr_cpu->nested_host_state->nip;
|
||||
|
||||
assert(env->spr[SPR_LPIDR] != 0);
|
||||
env->spr[SPR_LPCR] = spapr_cpu->nested_host_state->spr[SPR_LPCR];
|
||||
env->spr[SPR_LPIDR] = spapr_cpu->nested_host_state->spr[SPR_LPIDR];
|
||||
env->spr[SPR_PCR] = spapr_cpu->nested_host_state->spr[SPR_PCR];
|
||||
env->spr[SPR_DPDES] = 0;
|
||||
env->spr[SPR_HFSCR] = spapr_cpu->nested_host_state->spr[SPR_HFSCR];
|
||||
env->spr[SPR_SRR0] = spapr_cpu->nested_host_state->spr[SPR_SRR0];
|
||||
env->spr[SPR_SRR1] = spapr_cpu->nested_host_state->spr[SPR_SRR1];
|
||||
env->spr[SPR_SPRG0] = spapr_cpu->nested_host_state->spr[SPR_SPRG0];
|
||||
env->spr[SPR_SPRG1] = spapr_cpu->nested_host_state->spr[SPR_SPRG1];
|
||||
env->spr[SPR_SPRG2] = spapr_cpu->nested_host_state->spr[SPR_SPRG2];
|
||||
env->spr[SPR_SPRG3] = spapr_cpu->nested_host_state->spr[SPR_SPRG3];
|
||||
env->spr[SPR_BOOKS_PID] = spapr_cpu->nested_host_state->spr[SPR_BOOKS_PID];
|
||||
env->spr[SPR_PPR] = spapr_cpu->nested_host_state->spr[SPR_PPR];
|
||||
|
||||
/*
|
||||
* Return the interrupt vector address from H_ENTER_NESTED to the L1
|
||||
* (or error code).
|
||||
*/
|
||||
env->gpr[3] = r3_return;
|
||||
|
||||
env->tb_env->tb_offset -= spapr_cpu->nested_tb_offset;
|
||||
spapr_cpu->in_nested = false;
|
||||
|
||||
hreg_compute_hflags(env);
|
||||
tlb_flush(cs);
|
||||
env->reserve_addr = -1; /* Reset the reservation */
|
||||
|
||||
g_free(spapr_cpu->nested_host_state);
|
||||
spapr_cpu->nested_host_state = NULL;
|
||||
}
|
||||
|
||||
static void hypercall_register_types(void)
|
||||
{
|
||||
hypercall_register_softmmu();
|
||||
|
@ -1552,6 +1880,11 @@ static void hypercall_register_types(void)
|
|||
spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
|
||||
|
||||
spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt);
|
||||
|
||||
spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl);
|
||||
spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested);
|
||||
spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate);
|
||||
spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest);
|
||||
}
|
||||
|
||||
type_init(hypercall_register_types)
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
* THE SOFTWARE.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qapi/error.h"
|
||||
#include "hw/ppc/spapr_drc.h"
|
||||
#include "hw/ppc/spapr_nvdimm.h"
|
||||
|
@ -30,6 +31,10 @@
|
|||
#include "hw/ppc/fdt.h"
|
||||
#include "qemu/range.h"
|
||||
#include "hw/ppc/spapr_numa.h"
|
||||
#include "block/thread-pool.h"
|
||||
#include "migration/vmstate.h"
|
||||
#include "qemu/pmem.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
|
||||
/* DIMM health bitmap bitmap indicators. Taken from kernel's papr_scm.c */
|
||||
/* SCM device is unable to persist memory contents */
|
||||
|
@ -47,11 +52,25 @@
|
|||
/* Have an explicit check for alignment */
|
||||
QEMU_BUILD_BUG_ON(SPAPR_MINIMUM_SCM_BLOCK_SIZE % SPAPR_MEMORY_BLOCK_SIZE);
|
||||
|
||||
#define TYPE_SPAPR_NVDIMM "spapr-nvdimm"
|
||||
OBJECT_DECLARE_TYPE(SpaprNVDIMMDevice, SPAPRNVDIMMClass, SPAPR_NVDIMM)
|
||||
|
||||
struct SPAPRNVDIMMClass {
|
||||
/* private */
|
||||
NVDIMMClass parent_class;
|
||||
|
||||
/* public */
|
||||
void (*realize)(NVDIMMDevice *dimm, Error **errp);
|
||||
void (*unrealize)(NVDIMMDevice *dimm, Error **errp);
|
||||
};
|
||||
|
||||
bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm,
|
||||
uint64_t size, Error **errp)
|
||||
{
|
||||
const MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
|
||||
const MachineState *ms = MACHINE(hotplug_dev);
|
||||
PCDIMMDevice *dimm = PC_DIMM(nvdimm);
|
||||
MemoryRegion *mr = host_memory_backend_get_memory(dimm->hostmem);
|
||||
g_autofree char *uuidstr = NULL;
|
||||
QemuUUID uuid;
|
||||
int ret;
|
||||
|
@ -89,6 +108,14 @@ bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (object_dynamic_cast(OBJECT(nvdimm), TYPE_SPAPR_NVDIMM) &&
|
||||
(memory_region_get_fd(mr) < 0)) {
|
||||
error_setg(errp, "spapr-nvdimm device requires the "
|
||||
"memdev %s to be of memory-backend-file type",
|
||||
object_get_canonical_path_component(OBJECT(dimm->hostmem)));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -160,6 +187,20 @@ static int spapr_dt_nvdimm(SpaprMachineState *spapr, void *fdt,
|
|||
"operating-system")));
|
||||
_FDT(fdt_setprop(fdt, child_offset, "ibm,cache-flush-required", NULL, 0));
|
||||
|
||||
if (object_dynamic_cast(OBJECT(nvdimm), TYPE_SPAPR_NVDIMM)) {
|
||||
bool is_pmem = false, pmem_override = false;
|
||||
PCDIMMDevice *dimm = PC_DIMM(nvdimm);
|
||||
HostMemoryBackend *hostmem = dimm->hostmem;
|
||||
|
||||
is_pmem = object_property_get_bool(OBJECT(hostmem), "pmem", NULL);
|
||||
pmem_override = object_property_get_bool(OBJECT(nvdimm),
|
||||
"pmem-override", NULL);
|
||||
if (!is_pmem || pmem_override) {
|
||||
_FDT(fdt_setprop(fdt, child_offset, "ibm,hcall-flush-required",
|
||||
NULL, 0));
|
||||
}
|
||||
}
|
||||
|
||||
return child_offset;
|
||||
}
|
||||
|
||||
|
@ -375,6 +416,293 @@ static target_ulong h_scm_bind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
typedef struct SpaprNVDIMMDeviceFlushState {
|
||||
uint64_t continue_token;
|
||||
int64_t hcall_ret;
|
||||
uint32_t drcidx;
|
||||
|
||||
QLIST_ENTRY(SpaprNVDIMMDeviceFlushState) node;
|
||||
} SpaprNVDIMMDeviceFlushState;
|
||||
|
||||
typedef struct SpaprNVDIMMDevice SpaprNVDIMMDevice;
|
||||
struct SpaprNVDIMMDevice {
|
||||
/* private */
|
||||
NVDIMMDevice parent_obj;
|
||||
|
||||
bool hcall_flush_required;
|
||||
uint64_t nvdimm_flush_token;
|
||||
QLIST_HEAD(, SpaprNVDIMMDeviceFlushState) pending_nvdimm_flush_states;
|
||||
QLIST_HEAD(, SpaprNVDIMMDeviceFlushState) completed_nvdimm_flush_states;
|
||||
|
||||
/* public */
|
||||
|
||||
/*
|
||||
* The 'on' value for this property forced the qemu to enable the hcall
|
||||
* flush for the nvdimm device even if the backend is a pmem
|
||||
*/
|
||||
bool pmem_override;
|
||||
};
|
||||
|
||||
static int flush_worker_cb(void *opaque)
|
||||
{
|
||||
SpaprNVDIMMDeviceFlushState *state = opaque;
|
||||
SpaprDrc *drc = spapr_drc_by_index(state->drcidx);
|
||||
PCDIMMDevice *dimm = PC_DIMM(drc->dev);
|
||||
HostMemoryBackend *backend = MEMORY_BACKEND(dimm->hostmem);
|
||||
int backend_fd = memory_region_get_fd(&backend->mr);
|
||||
|
||||
if (object_property_get_bool(OBJECT(backend), "pmem", NULL)) {
|
||||
MemoryRegion *mr = host_memory_backend_get_memory(dimm->hostmem);
|
||||
void *ptr = memory_region_get_ram_ptr(mr);
|
||||
size_t size = object_property_get_uint(OBJECT(dimm), PC_DIMM_SIZE_PROP,
|
||||
NULL);
|
||||
|
||||
/* flush pmem backend */
|
||||
pmem_persist(ptr, size);
|
||||
} else {
|
||||
/* flush raw backing image */
|
||||
if (qemu_fdatasync(backend_fd) < 0) {
|
||||
error_report("papr_scm: Could not sync nvdimm to backend file: %s",
|
||||
strerror(errno));
|
||||
return H_HARDWARE;
|
||||
}
|
||||
}
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static void spapr_nvdimm_flush_completion_cb(void *opaque, int hcall_ret)
|
||||
{
|
||||
SpaprNVDIMMDeviceFlushState *state = opaque;
|
||||
SpaprDrc *drc = spapr_drc_by_index(state->drcidx);
|
||||
SpaprNVDIMMDevice *s_nvdimm = SPAPR_NVDIMM(drc->dev);
|
||||
|
||||
state->hcall_ret = hcall_ret;
|
||||
QLIST_REMOVE(state, node);
|
||||
QLIST_INSERT_HEAD(&s_nvdimm->completed_nvdimm_flush_states, state, node);
|
||||
}
|
||||
|
||||
static int spapr_nvdimm_flush_post_load(void *opaque, int version_id)
|
||||
{
|
||||
SpaprNVDIMMDevice *s_nvdimm = (SpaprNVDIMMDevice *)opaque;
|
||||
SpaprNVDIMMDeviceFlushState *state;
|
||||
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
|
||||
HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(s_nvdimm)->hostmem);
|
||||
bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL);
|
||||
bool pmem_override = object_property_get_bool(OBJECT(s_nvdimm),
|
||||
"pmem-override", NULL);
|
||||
bool dest_hcall_flush_required = pmem_override || !is_pmem;
|
||||
|
||||
if (!s_nvdimm->hcall_flush_required && dest_hcall_flush_required) {
|
||||
error_report("The file backend for the spapr-nvdimm device %s at "
|
||||
"source is a pmem, use pmem=on and pmem-override=off to "
|
||||
"continue.", DEVICE(s_nvdimm)->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (s_nvdimm->hcall_flush_required && !dest_hcall_flush_required) {
|
||||
error_report("The guest expects hcall-flush support for the "
|
||||
"spapr-nvdimm device %s, use pmem_override=on to "
|
||||
"continue.", DEVICE(s_nvdimm)->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) {
|
||||
thread_pool_submit_aio(pool, flush_worker_cb, state,
|
||||
spapr_nvdimm_flush_completion_cb, state);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_spapr_nvdimm_flush_state = {
|
||||
.name = "spapr_nvdimm_flush_state",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT64(continue_token, SpaprNVDIMMDeviceFlushState),
|
||||
VMSTATE_INT64(hcall_ret, SpaprNVDIMMDeviceFlushState),
|
||||
VMSTATE_UINT32(drcidx, SpaprNVDIMMDeviceFlushState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
const VMStateDescription vmstate_spapr_nvdimm_states = {
|
||||
.name = "spapr_nvdimm_states",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.post_load = spapr_nvdimm_flush_post_load,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_BOOL(hcall_flush_required, SpaprNVDIMMDevice),
|
||||
VMSTATE_UINT64(nvdimm_flush_token, SpaprNVDIMMDevice),
|
||||
VMSTATE_QLIST_V(completed_nvdimm_flush_states, SpaprNVDIMMDevice, 1,
|
||||
vmstate_spapr_nvdimm_flush_state,
|
||||
SpaprNVDIMMDeviceFlushState, node),
|
||||
VMSTATE_QLIST_V(pending_nvdimm_flush_states, SpaprNVDIMMDevice, 1,
|
||||
vmstate_spapr_nvdimm_flush_state,
|
||||
SpaprNVDIMMDeviceFlushState, node),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Assign a token and reserve it for the new flush state.
|
||||
*/
|
||||
static SpaprNVDIMMDeviceFlushState *spapr_nvdimm_init_new_flush_state(
|
||||
SpaprNVDIMMDevice *spapr_nvdimm)
|
||||
{
|
||||
SpaprNVDIMMDeviceFlushState *state;
|
||||
|
||||
state = g_malloc0(sizeof(*state));
|
||||
|
||||
spapr_nvdimm->nvdimm_flush_token++;
|
||||
/* Token zero is presumed as no job pending. Assert on overflow to zero */
|
||||
g_assert(spapr_nvdimm->nvdimm_flush_token != 0);
|
||||
|
||||
state->continue_token = spapr_nvdimm->nvdimm_flush_token;
|
||||
|
||||
QLIST_INSERT_HEAD(&spapr_nvdimm->pending_nvdimm_flush_states, state, node);
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
/*
|
||||
* spapr_nvdimm_finish_flushes
|
||||
* Waits for all pending flush requests to complete
|
||||
* their execution and free the states
|
||||
*/
|
||||
void spapr_nvdimm_finish_flushes(void)
|
||||
{
|
||||
SpaprNVDIMMDeviceFlushState *state, *next;
|
||||
GSList *list, *nvdimms;
|
||||
|
||||
/*
|
||||
* Called on reset path, the main loop thread which calls
|
||||
* the pending BHs has gotten out running in the reset path,
|
||||
* finally reaching here. Other code path being guest
|
||||
* h_client_architecture_support, thats early boot up.
|
||||
*/
|
||||
nvdimms = nvdimm_get_device_list();
|
||||
for (list = nvdimms; list; list = list->next) {
|
||||
NVDIMMDevice *nvdimm = list->data;
|
||||
if (object_dynamic_cast(OBJECT(nvdimm), TYPE_SPAPR_NVDIMM)) {
|
||||
SpaprNVDIMMDevice *s_nvdimm = SPAPR_NVDIMM(nvdimm);
|
||||
while (!QLIST_EMPTY(&s_nvdimm->pending_nvdimm_flush_states)) {
|
||||
aio_poll(qemu_get_aio_context(), true);
|
||||
}
|
||||
|
||||
QLIST_FOREACH_SAFE(state, &s_nvdimm->completed_nvdimm_flush_states,
|
||||
node, next) {
|
||||
QLIST_REMOVE(state, node);
|
||||
g_free(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
g_slist_free(nvdimms);
|
||||
}
|
||||
|
||||
/*
|
||||
* spapr_nvdimm_get_flush_status
|
||||
* Fetches the status of the hcall worker and returns
|
||||
* H_LONG_BUSY_ORDER_10_MSEC if the worker is still running.
|
||||
*/
|
||||
static int spapr_nvdimm_get_flush_status(SpaprNVDIMMDevice *s_nvdimm,
|
||||
uint64_t token)
|
||||
{
|
||||
SpaprNVDIMMDeviceFlushState *state, *node;
|
||||
|
||||
QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) {
|
||||
if (state->continue_token == token) {
|
||||
return H_LONG_BUSY_ORDER_10_MSEC;
|
||||
}
|
||||
}
|
||||
|
||||
QLIST_FOREACH_SAFE(state, &s_nvdimm->completed_nvdimm_flush_states,
|
||||
node, node) {
|
||||
if (state->continue_token == token) {
|
||||
int ret = state->hcall_ret;
|
||||
QLIST_REMOVE(state, node);
|
||||
g_free(state);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* If not found in complete list too, invalid token */
|
||||
return H_P2;
|
||||
}
|
||||
|
||||
/*
|
||||
* H_SCM_FLUSH
|
||||
* Input: drc_index, continue-token
|
||||
* Out: continue-token
|
||||
* Return Value: H_SUCCESS, H_Parameter, H_P2, H_LONG_BUSY_ORDER_10_MSEC,
|
||||
* H_UNSUPPORTED
|
||||
*
|
||||
* Given a DRC Index Flush the data to backend NVDIMM device. The hcall returns
|
||||
* H_LONG_BUSY_ORDER_10_MSEC when the flush takes longer time and the hcall
|
||||
* needs to be issued multiple times in order to be completely serviced. The
|
||||
* continue-token from the output to be passed in the argument list of
|
||||
* subsequent hcalls until the hcall is completely serviced at which point
|
||||
* H_SUCCESS or other error is returned.
|
||||
*/
|
||||
static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
int ret;
|
||||
uint32_t drc_index = args[0];
|
||||
uint64_t continue_token = args[1];
|
||||
SpaprDrc *drc = spapr_drc_by_index(drc_index);
|
||||
PCDIMMDevice *dimm;
|
||||
HostMemoryBackend *backend = NULL;
|
||||
SpaprNVDIMMDeviceFlushState *state;
|
||||
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
|
||||
int fd;
|
||||
|
||||
if (!drc || !drc->dev ||
|
||||
spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
dimm = PC_DIMM(drc->dev);
|
||||
if (!object_dynamic_cast(OBJECT(dimm), TYPE_SPAPR_NVDIMM)) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
if (continue_token == 0) {
|
||||
bool is_pmem = false, pmem_override = false;
|
||||
backend = MEMORY_BACKEND(dimm->hostmem);
|
||||
fd = memory_region_get_fd(&backend->mr);
|
||||
|
||||
if (fd < 0) {
|
||||
return H_UNSUPPORTED;
|
||||
}
|
||||
|
||||
is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL);
|
||||
pmem_override = object_property_get_bool(OBJECT(dimm),
|
||||
"pmem-override", NULL);
|
||||
if (is_pmem && !pmem_override) {
|
||||
return H_UNSUPPORTED;
|
||||
}
|
||||
|
||||
state = spapr_nvdimm_init_new_flush_state(SPAPR_NVDIMM(dimm));
|
||||
if (!state) {
|
||||
return H_HARDWARE;
|
||||
}
|
||||
|
||||
state->drcidx = drc_index;
|
||||
|
||||
thread_pool_submit_aio(pool, flush_worker_cb, state,
|
||||
spapr_nvdimm_flush_completion_cb, state);
|
||||
|
||||
continue_token = state->continue_token;
|
||||
}
|
||||
|
||||
ret = spapr_nvdimm_get_flush_status(SPAPR_NVDIMM(dimm), continue_token);
|
||||
if (H_IS_LONG_BUSY(ret)) {
|
||||
args[0] = continue_token;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static target_ulong h_scm_unbind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
|
@ -523,6 +851,70 @@ static void spapr_scm_register_types(void)
|
|||
spapr_register_hypercall(H_SCM_UNBIND_MEM, h_scm_unbind_mem);
|
||||
spapr_register_hypercall(H_SCM_UNBIND_ALL, h_scm_unbind_all);
|
||||
spapr_register_hypercall(H_SCM_HEALTH, h_scm_health);
|
||||
spapr_register_hypercall(H_SCM_FLUSH, h_scm_flush);
|
||||
}
|
||||
|
||||
type_init(spapr_scm_register_types)
|
||||
|
||||
static void spapr_nvdimm_realize(NVDIMMDevice *dimm, Error **errp)
|
||||
{
|
||||
SpaprNVDIMMDevice *s_nvdimm = SPAPR_NVDIMM(dimm);
|
||||
HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(dimm)->hostmem);
|
||||
bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL);
|
||||
bool pmem_override = object_property_get_bool(OBJECT(dimm), "pmem-override",
|
||||
NULL);
|
||||
if (!is_pmem || pmem_override) {
|
||||
s_nvdimm->hcall_flush_required = true;
|
||||
}
|
||||
|
||||
vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY,
|
||||
&vmstate_spapr_nvdimm_states, dimm);
|
||||
}
|
||||
|
||||
static void spapr_nvdimm_unrealize(NVDIMMDevice *dimm)
|
||||
{
|
||||
vmstate_unregister(NULL, &vmstate_spapr_nvdimm_states, dimm);
|
||||
}
|
||||
|
||||
static Property spapr_nvdimm_properties[] = {
|
||||
#ifdef CONFIG_LIBPMEM
|
||||
DEFINE_PROP_BOOL("pmem-override", SpaprNVDIMMDevice, pmem_override, false),
|
||||
#endif
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void spapr_nvdimm_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
NVDIMMClass *nvc = NVDIMM_CLASS(oc);
|
||||
|
||||
nvc->realize = spapr_nvdimm_realize;
|
||||
nvc->unrealize = spapr_nvdimm_unrealize;
|
||||
|
||||
device_class_set_props(dc, spapr_nvdimm_properties);
|
||||
}
|
||||
|
||||
static void spapr_nvdimm_init(Object *obj)
|
||||
{
|
||||
SpaprNVDIMMDevice *s_nvdimm = SPAPR_NVDIMM(obj);
|
||||
|
||||
s_nvdimm->hcall_flush_required = false;
|
||||
QLIST_INIT(&s_nvdimm->pending_nvdimm_flush_states);
|
||||
QLIST_INIT(&s_nvdimm->completed_nvdimm_flush_states);
|
||||
}
|
||||
|
||||
static TypeInfo spapr_nvdimm_info = {
|
||||
.name = TYPE_SPAPR_NVDIMM,
|
||||
.parent = TYPE_NVDIMM,
|
||||
.class_init = spapr_nvdimm_class_init,
|
||||
.class_size = sizeof(SPAPRNVDIMMClass),
|
||||
.instance_size = sizeof(SpaprNVDIMMDevice),
|
||||
.instance_init = spapr_nvdimm_init,
|
||||
};
|
||||
|
||||
static void spapr_nvdimm_register_types(void)
|
||||
{
|
||||
type_register_static(&spapr_nvdimm_info);
|
||||
}
|
||||
|
||||
type_init(spapr_nvdimm_register_types)
|
||||
|
|
|
@ -103,6 +103,8 @@ struct NVDIMMClass {
|
|||
/* write @size bytes from @buf to NVDIMM label data at @offset. */
|
||||
void (*write_label_data)(NVDIMMDevice *nvdimm, const void *buf,
|
||||
uint64_t size, uint64_t offset);
|
||||
void (*realize)(NVDIMMDevice *nvdimm, Error **errp);
|
||||
void (*unrealize)(NVDIMMDevice *nvdimm);
|
||||
};
|
||||
|
||||
#define NVDIMM_DSM_MEM_FILE "etc/acpi/nvdimm-mem"
|
||||
|
|
|
@ -63,6 +63,7 @@ struct PCDIMMDeviceClass {
|
|||
|
||||
/* public */
|
||||
void (*realize)(PCDIMMDevice *dimm, Error **errp);
|
||||
void (*unrealize)(PCDIMMDevice *dimm);
|
||||
};
|
||||
|
||||
void pc_dimm_pre_plug(PCDIMMDevice *dimm, MachineState *machine,
|
||||
|
|
|
@ -54,6 +54,9 @@ struct ppc_tb_t {
|
|||
|
||||
uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset);
|
||||
clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq);
|
||||
void cpu_ppc_hdecr_init(CPUPPCState *env);
|
||||
void cpu_ppc_hdecr_exit(CPUPPCState *env);
|
||||
|
||||
/* Embedded PowerPC DCR management */
|
||||
typedef uint32_t (*dcr_read_cb)(void *opaque, int dcrn);
|
||||
typedef void (*dcr_write_cb)(void *opaque, int dcrn, uint32_t val);
|
||||
|
|
|
@ -197,6 +197,9 @@ struct SpaprMachineState {
|
|||
bool has_graphics;
|
||||
uint32_t vsmt; /* Virtual SMT mode (KVM's "core stride") */
|
||||
|
||||
/* Nested HV support (TCG only) */
|
||||
uint64_t nested_ptcr;
|
||||
|
||||
Notifier epow_notifier;
|
||||
QTAILQ_HEAD(, SpaprEventLogEntry) pending_events;
|
||||
bool use_hotplug_event_source;
|
||||
|
@ -341,6 +344,7 @@ struct SpaprMachineState {
|
|||
#define H_P7 -60
|
||||
#define H_P8 -61
|
||||
#define H_P9 -62
|
||||
#define H_UNSUPPORTED -67
|
||||
#define H_OVERLAP -68
|
||||
#define H_UNSUPPORTED_FLAG -256
|
||||
#define H_MULTI_THREADS_ACTIVE -9005
|
||||
|
@ -559,8 +563,9 @@ struct SpaprMachineState {
|
|||
#define H_SCM_UNBIND_ALL 0x3FC
|
||||
#define H_SCM_HEALTH 0x400
|
||||
#define H_RPT_INVALIDATE 0x448
|
||||
#define H_SCM_FLUSH 0x44C
|
||||
|
||||
#define MAX_HCALL_OPCODE H_RPT_INVALIDATE
|
||||
#define MAX_HCALL_OPCODE H_SCM_FLUSH
|
||||
|
||||
/* The hcalls above are standardized in PAPR and implemented by pHyp
|
||||
* as well.
|
||||
|
@ -577,7 +582,14 @@ struct SpaprMachineState {
|
|||
#define KVMPPC_H_UPDATE_DT (KVMPPC_HCALL_BASE + 0x3)
|
||||
/* 0x4 was used for KVMPPC_H_UPDATE_PHANDLE in SLOF */
|
||||
#define KVMPPC_H_VOF_CLIENT (KVMPPC_HCALL_BASE + 0x5)
|
||||
#define KVMPPC_HCALL_MAX KVMPPC_H_VOF_CLIENT
|
||||
|
||||
/* Platform-specific hcalls used for nested HV KVM */
|
||||
#define KVMPPC_H_SET_PARTITION_TABLE (KVMPPC_HCALL_BASE + 0x800)
|
||||
#define KVMPPC_H_ENTER_NESTED (KVMPPC_HCALL_BASE + 0x804)
|
||||
#define KVMPPC_H_TLB_INVALIDATE (KVMPPC_HCALL_BASE + 0x808)
|
||||
#define KVMPPC_H_COPY_TOFROM_GUEST (KVMPPC_HCALL_BASE + 0x80C)
|
||||
|
||||
#define KVMPPC_HCALL_MAX KVMPPC_H_COPY_TOFROM_GUEST
|
||||
|
||||
/*
|
||||
* The hcall range 0xEF00 to 0xEF80 is reserved for use in facilitating
|
||||
|
@ -587,6 +599,65 @@ struct SpaprMachineState {
|
|||
#define SVM_H_TPM_COMM 0xEF10
|
||||
#define SVM_HCALL_MAX SVM_H_TPM_COMM
|
||||
|
||||
/*
|
||||
* Register state for entering a nested guest with H_ENTER_NESTED.
|
||||
* New member must be added at the end.
|
||||
*/
|
||||
struct kvmppc_hv_guest_state {
|
||||
uint64_t version; /* version of this structure layout, must be first */
|
||||
uint32_t lpid;
|
||||
uint32_t vcpu_token;
|
||||
/* These registers are hypervisor privileged (at least for writing) */
|
||||
uint64_t lpcr;
|
||||
uint64_t pcr;
|
||||
uint64_t amor;
|
||||
uint64_t dpdes;
|
||||
uint64_t hfscr;
|
||||
int64_t tb_offset;
|
||||
uint64_t dawr0;
|
||||
uint64_t dawrx0;
|
||||
uint64_t ciabr;
|
||||
uint64_t hdec_expiry;
|
||||
uint64_t purr;
|
||||
uint64_t spurr;
|
||||
uint64_t ic;
|
||||
uint64_t vtb;
|
||||
uint64_t hdar;
|
||||
uint64_t hdsisr;
|
||||
uint64_t heir;
|
||||
uint64_t asdr;
|
||||
/* These are OS privileged but need to be set late in guest entry */
|
||||
uint64_t srr0;
|
||||
uint64_t srr1;
|
||||
uint64_t sprg[4];
|
||||
uint64_t pidr;
|
||||
uint64_t cfar;
|
||||
uint64_t ppr;
|
||||
/* Version 1 ends here */
|
||||
uint64_t dawr1;
|
||||
uint64_t dawrx1;
|
||||
/* Version 2 ends here */
|
||||
};
|
||||
|
||||
/* Latest version of hv_guest_state structure */
|
||||
#define HV_GUEST_STATE_VERSION 2
|
||||
|
||||
/* Linux 64-bit powerpc pt_regs struct, used by nested HV */
|
||||
struct kvmppc_pt_regs {
|
||||
uint64_t gpr[32];
|
||||
uint64_t nip;
|
||||
uint64_t msr;
|
||||
uint64_t orig_gpr3; /* Used for restarting system calls */
|
||||
uint64_t ctr;
|
||||
uint64_t link;
|
||||
uint64_t xer;
|
||||
uint64_t ccr;
|
||||
uint64_t softe; /* Soft enabled/disabled */
|
||||
uint64_t trap; /* Reason for being here */
|
||||
uint64_t dar; /* Fault registers */
|
||||
uint64_t dsisr; /* on 4xx/Book-E used for ESR */
|
||||
uint64_t result; /* Result of a system call */
|
||||
};
|
||||
|
||||
typedef struct SpaprDeviceTreeUpdateHeader {
|
||||
uint32_t version_id;
|
||||
|
@ -604,6 +675,9 @@ typedef target_ulong (*spapr_hcall_fn)(PowerPCCPU *cpu, SpaprMachineState *sm,
|
|||
void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn);
|
||||
target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
|
||||
target_ulong *args);
|
||||
|
||||
void spapr_exit_nested(PowerPCCPU *cpu, int excp);
|
||||
|
||||
target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
target_ulong shift);
|
||||
target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
|
|
|
@ -48,6 +48,11 @@ typedef struct SpaprCpuState {
|
|||
bool prod; /* not migrated, only used to improve dispatch latencies */
|
||||
struct ICPState *icp;
|
||||
struct XiveTCTX *tctx;
|
||||
|
||||
/* Fields for nested-HV support */
|
||||
bool in_nested; /* true while the L2 is executing */
|
||||
CPUPPCState *nested_host_state; /* holds the L1 state while L2 executes */
|
||||
int64_t nested_tb_offset; /* L1->L2 TB offset */
|
||||
} SpaprCpuState;
|
||||
|
||||
static inline SpaprCpuState *spapr_cpu_state(PowerPCCPU *cpu)
|
||||
|
|
|
@ -21,5 +21,6 @@ void spapr_dt_persistent_memory(SpaprMachineState *spapr, void *fdt);
|
|||
bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm,
|
||||
uint64_t size, Error **errp);
|
||||
void spapr_add_nvdimm(DeviceState *dev, uint64_t slot);
|
||||
void spapr_nvdimm_finish_flushes(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1311,6 +1311,8 @@ PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc);
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
struct PPCVirtualHypervisorClass {
|
||||
InterfaceClass parent;
|
||||
bool (*cpu_in_nested)(PowerPCCPU *cpu);
|
||||
void (*deliver_hv_excp)(PowerPCCPU *cpu, int excp);
|
||||
void (*hypercall)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
|
||||
hwaddr (*hpt_mask)(PPCVirtualHypervisor *vhyp);
|
||||
const ppc_hash_pte64_t *(*map_hptes)(PPCVirtualHypervisor *vhyp,
|
||||
|
@ -1320,7 +1322,8 @@ struct PPCVirtualHypervisorClass {
|
|||
hwaddr ptex, int n);
|
||||
void (*hpte_set_c)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
|
||||
void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
|
||||
void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry);
|
||||
bool (*get_pate)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
|
||||
target_ulong lpid, ppc_v3_pate_t *entry);
|
||||
target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp);
|
||||
void (*cpu_exec_enter)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
|
||||
void (*cpu_exec_exit)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
|
||||
|
@ -1329,6 +1332,11 @@ struct PPCVirtualHypervisorClass {
|
|||
#define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor"
|
||||
DECLARE_OBJ_CHECKERS(PPCVirtualHypervisor, PPCVirtualHypervisorClass,
|
||||
PPC_VIRTUAL_HYPERVISOR, TYPE_PPC_VIRTUAL_HYPERVISOR)
|
||||
|
||||
static inline bool vhyp_cpu_in_nested(PowerPCCPU *cpu)
|
||||
{
|
||||
return PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp)->cpu_in_nested(cpu);
|
||||
}
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
void ppc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
|
||||
|
@ -2724,4 +2732,43 @@ void dump_mmu(CPUPPCState *env);
|
|||
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len);
|
||||
void ppc_store_vscr(CPUPPCState *env, uint32_t vscr);
|
||||
uint32_t ppc_get_vscr(CPUPPCState *env);
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Power management enable checks */
|
||||
static inline int check_pow_none(CPUPPCState *env)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int check_pow_nocheck(CPUPPCState *env)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
/* PowerPC implementations definitions */
|
||||
|
||||
#define POWERPC_FAMILY(_name) \
|
||||
static void \
|
||||
glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, void *); \
|
||||
\
|
||||
static const TypeInfo \
|
||||
glue(glue(ppc_, _name), _cpu_family_type_info) = { \
|
||||
.name = stringify(_name) "-family-" TYPE_POWERPC_CPU, \
|
||||
.parent = TYPE_POWERPC_CPU, \
|
||||
.abstract = true, \
|
||||
.class_init = glue(glue(ppc_, _name), _cpu_family_class_init), \
|
||||
}; \
|
||||
\
|
||||
static void glue(glue(ppc_, _name), _cpu_family_register_types)(void) \
|
||||
{ \
|
||||
type_register_static( \
|
||||
&glue(glue(ppc_, _name), _cpu_family_type_info)); \
|
||||
} \
|
||||
\
|
||||
type_init(glue(glue(ppc_, _name), _cpu_family_register_types)) \
|
||||
\
|
||||
static void glue(glue(ppc_, _name), _cpu_family_class_init)
|
||||
|
||||
|
||||
#endif /* PPC_CPU_H */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -360,12 +360,21 @@ static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr,
|
|||
}
|
||||
#endif
|
||||
|
||||
static void powerpc_set_excp_state(PowerPCCPU *cpu,
|
||||
target_ulong vector, target_ulong msr)
|
||||
static void powerpc_reset_excp_state(PowerPCCPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
/* Reset exception state */
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
}
|
||||
|
||||
static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector,
|
||||
target_ulong msr)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
assert((msr & env->msr_mask) == msr);
|
||||
|
||||
/*
|
||||
|
@ -376,21 +385,20 @@ static void powerpc_set_excp_state(PowerPCCPU *cpu,
|
|||
* will prevent setting of the HV bit which some exceptions might need
|
||||
* to do.
|
||||
*/
|
||||
env->nip = vector;
|
||||
env->msr = msr;
|
||||
hreg_compute_hflags(env);
|
||||
env->nip = vector;
|
||||
/* Reset exception state */
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
|
||||
/* Reset the reservation */
|
||||
env->reserve_addr = -1;
|
||||
powerpc_reset_excp_state(cpu);
|
||||
|
||||
/*
|
||||
* Any interrupt is context synchronizing, check if TCG TLB needs
|
||||
* a delayed flush on ppc64
|
||||
*/
|
||||
check_tlb_flush(env, false);
|
||||
|
||||
/* Reset the reservation */
|
||||
env->reserve_addr = -1;
|
||||
}
|
||||
|
||||
static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
|
||||
|
@ -471,8 +479,7 @@ static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
|
|||
case POWERPC_EXCP_FP:
|
||||
if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
|
||||
trace_ppc_excp_fp_ignore();
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
powerpc_reset_excp_state(cpu);
|
||||
return;
|
||||
}
|
||||
env->spr[SPR_40x_ESR] = ESR_FP;
|
||||
|
@ -609,8 +616,7 @@ static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp)
|
|||
case POWERPC_EXCP_FP:
|
||||
if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
|
||||
trace_ppc_excp_fp_ignore();
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
powerpc_reset_excp_state(cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -783,8 +789,7 @@ static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp)
|
|||
case POWERPC_EXCP_FP:
|
||||
if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
|
||||
trace_ppc_excp_fp_ignore();
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
powerpc_reset_excp_state(cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -969,8 +974,7 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp)
|
|||
case POWERPC_EXCP_FP:
|
||||
if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
|
||||
trace_ppc_excp_fp_ignore();
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
powerpc_reset_excp_state(cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1168,8 +1172,7 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp)
|
|||
case POWERPC_EXCP_FP:
|
||||
if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
|
||||
trace_ppc_excp_fp_ignore();
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
powerpc_reset_excp_state(cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1277,7 +1280,45 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp)
|
|||
powerpc_set_excp_state(cpu, vector, new_msr);
|
||||
}
|
||||
|
||||
/*
|
||||
* When running a nested HV guest under vhyp, external interrupts are
|
||||
* delivered as HVIRT.
|
||||
*/
|
||||
static bool books_vhyp_promotes_external_to_hvirt(PowerPCCPU *cpu)
|
||||
{
|
||||
if (cpu->vhyp) {
|
||||
return vhyp_cpu_in_nested(cpu);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef TARGET_PPC64
|
||||
/*
|
||||
* When running under vhyp, hcalls are always intercepted and sent to the
|
||||
* vhc->hypercall handler.
|
||||
*/
|
||||
static bool books_vhyp_handles_hcall(PowerPCCPU *cpu)
|
||||
{
|
||||
if (cpu->vhyp) {
|
||||
return !vhyp_cpu_in_nested(cpu);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* When running a nested KVM HV guest under vhyp, HV exceptions are not
|
||||
* delivered to the guest (because there is no concept of HV support), but
|
||||
* rather they are sent tothe vhyp to exit from the L2 back to the L1 and
|
||||
* return from the H_ENTER_NESTED hypercall.
|
||||
*/
|
||||
static bool books_vhyp_handles_hv_excp(PowerPCCPU *cpu)
|
||||
{
|
||||
if (cpu->vhyp) {
|
||||
return vhyp_cpu_in_nested(cpu);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
@ -1394,8 +1435,7 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
|
|||
case POWERPC_EXCP_FP:
|
||||
if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
|
||||
trace_ppc_excp_fp_ignore();
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
powerpc_reset_excp_state(cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1439,7 +1479,7 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
|
|||
env->nip += 4;
|
||||
|
||||
/* "PAPR mode" built-in hypercall emulation */
|
||||
if ((lev == 1) && cpu->vhyp) {
|
||||
if ((lev == 1) && books_vhyp_handles_hcall(cpu)) {
|
||||
PPCVirtualHypervisorClass *vhc =
|
||||
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
||||
vhc->hypercall(cpu->vhyp, cpu);
|
||||
|
@ -1527,12 +1567,6 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
|
|||
break;
|
||||
}
|
||||
|
||||
/* Sanity check */
|
||||
if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) {
|
||||
cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
|
||||
"no HV support\n", excp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sort out endianness of interrupt, this differs depending on the
|
||||
* CPU, the HV mode, etc...
|
||||
|
@ -1551,10 +1585,26 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
|
|||
env->spr[srr1] = msr;
|
||||
}
|
||||
|
||||
/* This can update new_msr and vector if AIL applies */
|
||||
ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector);
|
||||
if ((new_msr & MSR_HVB) && books_vhyp_handles_hv_excp(cpu)) {
|
||||
PPCVirtualHypervisorClass *vhc =
|
||||
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
||||
/* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */
|
||||
vhc->deliver_hv_excp(cpu, excp);
|
||||
|
||||
powerpc_set_excp_state(cpu, vector, new_msr);
|
||||
powerpc_reset_excp_state(cpu);
|
||||
|
||||
} else {
|
||||
/* Sanity check */
|
||||
if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) {
|
||||
cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
|
||||
"no HV support\n", excp);
|
||||
}
|
||||
|
||||
/* This can update new_msr and vector if AIL applies */
|
||||
ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector);
|
||||
|
||||
powerpc_set_excp_state(cpu, vector, new_msr);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp)
|
||||
|
@ -1674,7 +1724,11 @@ static void ppc_hw_interrupt(CPUPPCState *env)
|
|||
/* HEIC blocks delivery to the hypervisor */
|
||||
if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
|
||||
(env->has_hv_mode && msr_hv == 0 && !lpes0)) {
|
||||
powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
|
||||
if (books_vhyp_promotes_external_to_hvirt(cpu)) {
|
||||
powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
|
||||
} else {
|
||||
powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1784,6 +1838,8 @@ void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
|
|||
msr |= (1ULL << MSR_LE);
|
||||
}
|
||||
|
||||
/* Anything for nested required here? MSR[HV] bit? */
|
||||
|
||||
powerpc_set_excp_state(cpu, vector, msr);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include "sysemu/kvm.h"
|
||||
#include "helper_regs.h"
|
||||
#include "power8-pmu.h"
|
||||
#include "cpu-models.h"
|
||||
#include "spr_common.h"
|
||||
|
||||
/* Swap temporary saved registers with GPRs */
|
||||
void hreg_swap_gpr_tgpr(CPUPPCState *env)
|
||||
|
@ -302,3 +304,403 @@ void check_tlb_flush(CPUPPCState *env, bool global)
|
|||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* _spr_register
|
||||
*
|
||||
* Register an SPR with all the callbacks required for tcg,
|
||||
* and the ID number for KVM.
|
||||
*
|
||||
* The reason for the conditional compilation is that the tcg functions
|
||||
* may be compiled out, and the system kvm header may not be available
|
||||
* for supplying the ID numbers. This is ugly, but the best we can do.
|
||||
*/
|
||||
void _spr_register(CPUPPCState *env, int num, const char *name,
|
||||
USR_ARG(spr_callback *uea_read)
|
||||
USR_ARG(spr_callback *uea_write)
|
||||
SYS_ARG(spr_callback *oea_read)
|
||||
SYS_ARG(spr_callback *oea_write)
|
||||
SYS_ARG(spr_callback *hea_read)
|
||||
SYS_ARG(spr_callback *hea_write)
|
||||
KVM_ARG(uint64_t one_reg_id)
|
||||
target_ulong initial_value)
|
||||
{
|
||||
ppc_spr_t *spr = &env->spr_cb[num];
|
||||
|
||||
/* No SPR should be registered twice. */
|
||||
assert(spr->name == NULL);
|
||||
assert(name != NULL);
|
||||
|
||||
spr->name = name;
|
||||
spr->default_value = initial_value;
|
||||
env->spr[num] = initial_value;
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
spr->uea_read = uea_read;
|
||||
spr->uea_write = uea_write;
|
||||
# ifndef CONFIG_USER_ONLY
|
||||
spr->oea_read = oea_read;
|
||||
spr->oea_write = oea_write;
|
||||
spr->hea_read = hea_read;
|
||||
spr->hea_write = hea_write;
|
||||
# endif
|
||||
#endif
|
||||
#ifdef CONFIG_KVM
|
||||
spr->one_reg_id = one_reg_id;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Generic PowerPC SPRs */
|
||||
void register_generic_sprs(PowerPCCPU *cpu)
|
||||
{
|
||||
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
/* Integer processing */
|
||||
spr_register(env, SPR_XER, "XER",
|
||||
&spr_read_xer, &spr_write_xer,
|
||||
&spr_read_xer, &spr_write_xer,
|
||||
0x00000000);
|
||||
/* Branch control */
|
||||
spr_register(env, SPR_LR, "LR",
|
||||
&spr_read_lr, &spr_write_lr,
|
||||
&spr_read_lr, &spr_write_lr,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_CTR, "CTR",
|
||||
&spr_read_ctr, &spr_write_ctr,
|
||||
&spr_read_ctr, &spr_write_ctr,
|
||||
0x00000000);
|
||||
/* Interrupt processing */
|
||||
spr_register(env, SPR_SRR0, "SRR0",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_generic,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_SRR1, "SRR1",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_generic,
|
||||
0x00000000);
|
||||
/* Processor control */
|
||||
spr_register(env, SPR_SPRG0, "SPRG0",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_generic,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_SPRG1, "SPRG1",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_generic,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_SPRG2, "SPRG2",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_generic,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_SPRG3, "SPRG3",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_generic,
|
||||
0x00000000);
|
||||
|
||||
spr_register(env, SPR_PVR, "PVR",
|
||||
/* Linux permits userspace to read PVR */
|
||||
#if defined(CONFIG_LINUX_USER)
|
||||
&spr_read_generic,
|
||||
#else
|
||||
SPR_NOACCESS,
|
||||
#endif
|
||||
SPR_NOACCESS,
|
||||
&spr_read_generic, SPR_NOACCESS,
|
||||
pcc->pvr);
|
||||
|
||||
/* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */
|
||||
if (pcc->svr != POWERPC_SVR_NONE) {
|
||||
if (pcc->svr & POWERPC_SVR_E500) {
|
||||
spr_register(env, SPR_E500_SVR, "SVR",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, SPR_NOACCESS,
|
||||
pcc->svr & ~POWERPC_SVR_E500);
|
||||
} else {
|
||||
spr_register(env, SPR_SVR, "SVR",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, SPR_NOACCESS,
|
||||
pcc->svr);
|
||||
}
|
||||
}
|
||||
|
||||
/* Time base */
|
||||
spr_register(env, SPR_VTBL, "TBL",
|
||||
&spr_read_tbl, SPR_NOACCESS,
|
||||
&spr_read_tbl, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_TBL, "TBL",
|
||||
&spr_read_tbl, SPR_NOACCESS,
|
||||
&spr_read_tbl, &spr_write_tbl,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_VTBU, "TBU",
|
||||
&spr_read_tbu, SPR_NOACCESS,
|
||||
&spr_read_tbu, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_TBU, "TBU",
|
||||
&spr_read_tbu, SPR_NOACCESS,
|
||||
&spr_read_tbu, &spr_write_tbu,
|
||||
0x00000000);
|
||||
}
|
||||
|
||||
void register_non_embedded_sprs(CPUPPCState *env)
|
||||
{
|
||||
/* Exception processing */
|
||||
spr_register_kvm(env, SPR_DSISR, "DSISR",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_generic,
|
||||
KVM_REG_PPC_DSISR, 0x00000000);
|
||||
spr_register_kvm(env, SPR_DAR, "DAR",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_generic,
|
||||
KVM_REG_PPC_DAR, 0x00000000);
|
||||
/* Timer */
|
||||
spr_register(env, SPR_DECR, "DECR",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_decr, &spr_write_decr,
|
||||
0x00000000);
|
||||
}
|
||||
|
||||
/* Storage Description Register 1 */
|
||||
void register_sdr1_sprs(CPUPPCState *env)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (env->has_hv_mode) {
|
||||
/*
|
||||
* SDR1 is a hypervisor resource on CPUs which have a
|
||||
* hypervisor mode
|
||||
*/
|
||||
spr_register_hv(env, SPR_SDR1, "SDR1",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_sdr1,
|
||||
0x00000000);
|
||||
} else {
|
||||
spr_register(env, SPR_SDR1, "SDR1",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_sdr1,
|
||||
0x00000000);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* BATs 0-3 */
|
||||
void register_low_BATs(CPUPPCState *env)
|
||||
{
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
spr_register(env, SPR_IBAT0U, "IBAT0U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat, &spr_write_ibatu,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT0L, "IBAT0L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat, &spr_write_ibatl,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT1U, "IBAT1U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat, &spr_write_ibatu,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT1L, "IBAT1L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat, &spr_write_ibatl,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT2U, "IBAT2U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat, &spr_write_ibatu,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT2L, "IBAT2L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat, &spr_write_ibatl,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT3U, "IBAT3U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat, &spr_write_ibatu,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT3L, "IBAT3L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat, &spr_write_ibatl,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT0U, "DBAT0U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat, &spr_write_dbatu,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT0L, "DBAT0L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat, &spr_write_dbatl,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT1U, "DBAT1U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat, &spr_write_dbatu,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT1L, "DBAT1L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat, &spr_write_dbatl,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT2U, "DBAT2U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat, &spr_write_dbatu,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT2L, "DBAT2L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat, &spr_write_dbatl,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT3U, "DBAT3U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat, &spr_write_dbatu,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT3L, "DBAT3L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat, &spr_write_dbatl,
|
||||
0x00000000);
|
||||
env->nb_BATs += 4;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* BATs 4-7 */
|
||||
void register_high_BATs(CPUPPCState *env)
|
||||
{
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
spr_register(env, SPR_IBAT4U, "IBAT4U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat_h, &spr_write_ibatu_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT4L, "IBAT4L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat_h, &spr_write_ibatl_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT5U, "IBAT5U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat_h, &spr_write_ibatu_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT5L, "IBAT5L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat_h, &spr_write_ibatl_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT6U, "IBAT6U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat_h, &spr_write_ibatu_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT6L, "IBAT6L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat_h, &spr_write_ibatl_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT7U, "IBAT7U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat_h, &spr_write_ibatu_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IBAT7L, "IBAT7L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_ibat_h, &spr_write_ibatl_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT4U, "DBAT4U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat_h, &spr_write_dbatu_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT4L, "DBAT4L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat_h, &spr_write_dbatl_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT5U, "DBAT5U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat_h, &spr_write_dbatu_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT5L, "DBAT5L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat_h, &spr_write_dbatl_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT6U, "DBAT6U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat_h, &spr_write_dbatu_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT6L, "DBAT6L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat_h, &spr_write_dbatl_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT7U, "DBAT7U",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat_h, &spr_write_dbatu_h,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DBAT7L, "DBAT7L",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_dbat_h, &spr_write_dbatl_h,
|
||||
0x00000000);
|
||||
env->nb_BATs += 4;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Softare table search registers */
|
||||
void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways)
|
||||
{
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
env->nb_tlb = nb_tlbs;
|
||||
env->nb_ways = nb_ways;
|
||||
env->id_tlbs = 1;
|
||||
env->tlb_type = TLB_6XX;
|
||||
spr_register(env, SPR_DMISS, "DMISS",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_DCMP, "DCMP",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_HASH1, "HASH1",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_HASH2, "HASH2",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_IMISS, "IMISS",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_ICMP, "ICMP",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_RPA, "RPA",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_generic,
|
||||
0x00000000);
|
||||
#endif
|
||||
}
|
||||
|
||||
void register_thrm_sprs(CPUPPCState *env)
|
||||
{
|
||||
/* Thermal management */
|
||||
spr_register(env, SPR_THRM1, "THRM1",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_thrm, &spr_write_generic,
|
||||
0x00000000);
|
||||
|
||||
spr_register(env, SPR_THRM2, "THRM2",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_thrm, &spr_write_generic,
|
||||
0x00000000);
|
||||
|
||||
spr_register(env, SPR_THRM3, "THRM3",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_thrm, &spr_write_generic,
|
||||
0x00000000);
|
||||
}
|
||||
|
||||
void register_usprgh_sprs(CPUPPCState *env)
|
||||
{
|
||||
spr_register(env, SPR_USPRG4, "USPRG4",
|
||||
&spr_read_ureg, SPR_NOACCESS,
|
||||
&spr_read_ureg, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_USPRG5, "USPRG5",
|
||||
&spr_read_ureg, SPR_NOACCESS,
|
||||
&spr_read_ureg, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_USPRG6, "USPRG6",
|
||||
&spr_read_ureg, SPR_NOACCESS,
|
||||
&spr_read_ureg, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
spr_register(env, SPR_USPRG7, "USPRG7",
|
||||
&spr_read_ureg, SPR_NOACCESS,
|
||||
&spr_read_ureg, SPR_NOACCESS,
|
||||
0x00000000);
|
||||
}
|
||||
|
|
|
@ -354,6 +354,24 @@ static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The spapr vhc has a flat partition scope provided by qemu memory when
|
||||
* not nested.
|
||||
*
|
||||
* When running a nested guest, the addressing is 2-level radix on top of the
|
||||
* vhc memory, so it works practically identically to the bare metal 2-level
|
||||
* radix. So that code is selected directly. A cleaner and more flexible nested
|
||||
* hypervisor implementation would allow the vhc to provide a ->nested_xlate()
|
||||
* function but that is not required for the moment.
|
||||
*/
|
||||
static bool vhyp_flat_addressing(PowerPCCPU *cpu)
|
||||
{
|
||||
if (cpu->vhyp) {
|
||||
return !vhyp_cpu_in_nested(cpu);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
|
||||
MMUAccessType access_type,
|
||||
vaddr eaddr, uint64_t pid,
|
||||
|
@ -385,7 +403,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
|
|||
}
|
||||
prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
|
||||
|
||||
if (cpu->vhyp) {
|
||||
if (vhyp_flat_addressing(cpu)) {
|
||||
prtbe0 = ldq_phys(cs->as, prtbe_addr);
|
||||
} else {
|
||||
/*
|
||||
|
@ -411,7 +429,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
|
|||
*g_page_size = PRTBE_R_GET_RTS(prtbe0);
|
||||
base_addr = prtbe0 & PRTBE_R_RPDB;
|
||||
nls = prtbe0 & PRTBE_R_RPDS;
|
||||
if (msr_hv || cpu->vhyp) {
|
||||
if (msr_hv || vhyp_flat_addressing(cpu)) {
|
||||
/*
|
||||
* Can treat process table addresses as real addresses
|
||||
*/
|
||||
|
@ -515,7 +533,7 @@ static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
|
|||
relocation = !mmuidx_real(mmu_idx);
|
||||
|
||||
/* HV or virtual hypervisor Real Mode Access */
|
||||
if (!relocation && (mmuidx_hv(mmu_idx) || cpu->vhyp)) {
|
||||
if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) {
|
||||
/* In real mode top 4 effective addr bits (mostly) ignored */
|
||||
*raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
|
||||
|
||||
|
@ -552,17 +570,25 @@ static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
|
|||
if (cpu->vhyp) {
|
||||
PPCVirtualHypervisorClass *vhc;
|
||||
vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
||||
vhc->get_pate(cpu->vhyp, &pate);
|
||||
if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) {
|
||||
if (guest_visible) {
|
||||
ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
|
||||
DSISR_R_BADCONFIG);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
|
||||
if (guest_visible) {
|
||||
ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
|
||||
ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
|
||||
DSISR_R_BADCONFIG);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
if (!validate_pate(cpu, lpid, &pate)) {
|
||||
if (guest_visible) {
|
||||
ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
|
||||
ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
|
||||
DSISR_R_BADCONFIG);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -592,7 +618,7 @@ static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
|
|||
g_raddr = eaddr & R_EADDR_MASK;
|
||||
}
|
||||
|
||||
if (cpu->vhyp) {
|
||||
if (vhyp_flat_addressing(cpu)) {
|
||||
*raddr = g_raddr;
|
||||
} else {
|
||||
/*
|
||||
|
|
|
@ -16,11 +16,67 @@
|
|||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef SPR_TCG_H
|
||||
#define SPR_TCG_H
|
||||
#ifndef SPR_COMMON_H
|
||||
#define SPR_COMMON_H
|
||||
|
||||
#define SPR_NOACCESS (&spr_noaccess)
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
# define USR_ARG(X) X,
|
||||
# ifdef CONFIG_USER_ONLY
|
||||
# define SYS_ARG(X)
|
||||
# else
|
||||
# define SYS_ARG(X) X,
|
||||
# endif
|
||||
#else
|
||||
# define USR_ARG(X)
|
||||
# define SYS_ARG(X)
|
||||
#endif
|
||||
#ifdef CONFIG_KVM
|
||||
# define KVM_ARG(X) X,
|
||||
#else
|
||||
# define KVM_ARG(X)
|
||||
#endif
|
||||
|
||||
typedef void spr_callback(DisasContext *, int, int);
|
||||
|
||||
void _spr_register(CPUPPCState *env, int num, const char *name,
|
||||
USR_ARG(spr_callback *uea_read)
|
||||
USR_ARG(spr_callback *uea_write)
|
||||
SYS_ARG(spr_callback *oea_read)
|
||||
SYS_ARG(spr_callback *oea_write)
|
||||
SYS_ARG(spr_callback *hea_read)
|
||||
SYS_ARG(spr_callback *hea_write)
|
||||
KVM_ARG(uint64_t one_reg_id)
|
||||
target_ulong initial_value);
|
||||
|
||||
/* spr_register_kvm_hv passes all required arguments. */
|
||||
#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
|
||||
oea_read, oea_write, hea_read, hea_write, \
|
||||
one_reg_id, initial_value) \
|
||||
_spr_register(env, num, name, \
|
||||
USR_ARG(uea_read) USR_ARG(uea_write) \
|
||||
SYS_ARG(oea_read) SYS_ARG(oea_write) \
|
||||
SYS_ARG(hea_read) SYS_ARG(hea_write) \
|
||||
KVM_ARG(one_reg_id) initial_value)
|
||||
|
||||
/* spr_register_kvm duplicates the oea callbacks to the hea callbacks. */
|
||||
#define spr_register_kvm(env, num, name, uea_read, uea_write, \
|
||||
oea_read, oea_write, one_reg_id, ival) \
|
||||
spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \
|
||||
oea_write, oea_read, oea_write, one_reg_id, ival)
|
||||
|
||||
/* spr_register_hv and spr_register are similar, except there is no kvm id. */
|
||||
#define spr_register_hv(env, num, name, uea_read, uea_write, \
|
||||
oea_read, oea_write, hea_read, hea_write, ival) \
|
||||
spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \
|
||||
oea_write, hea_read, hea_write, 0, ival)
|
||||
|
||||
#define spr_register(env, num, name, uea_read, uea_write, \
|
||||
oea_read, oea_write, ival) \
|
||||
spr_register_kvm(env, num, name, uea_read, uea_write, \
|
||||
oea_read, oea_write, 0, ival)
|
||||
|
||||
/* prototypes for readers and writers for SPRs */
|
||||
void spr_noaccess(DisasContext *ctx, int gprn, int sprn);
|
||||
void spr_read_generic(DisasContext *ctx, int gprn, int sprn);
|
||||
|
@ -141,4 +197,13 @@ void spr_write_hmer(DisasContext *ctx, int sprn, int gprn);
|
|||
void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn);
|
||||
#endif
|
||||
|
||||
void register_low_BATs(CPUPPCState *env);
|
||||
void register_high_BATs(CPUPPCState *env);
|
||||
void register_sdr1_sprs(CPUPPCState *env);
|
||||
void register_thrm_sprs(CPUPPCState *env);
|
||||
void register_usprgh_sprs(CPUPPCState *env);
|
||||
void register_non_embedded_sprs(CPUPPCState *env);
|
||||
void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways);
|
||||
void register_generic_sprs(PowerPCCPU *cpu);
|
||||
|
||||
#endif
|
|
@ -35,7 +35,7 @@
|
|||
#include "exec/translator.h"
|
||||
#include "exec/log.h"
|
||||
#include "qemu/atomic128.h"
|
||||
#include "spr_tcg.h"
|
||||
#include "spr_common.h"
|
||||
|
||||
#include "qemu/qemu-print.h"
|
||||
#include "qapi/error.h"
|
||||
|
|
Loading…
Reference in New Issue