mirror of https://github.com/xemu-project/xemu.git
ppc patch queue 2019-04-26
Here's the first ppc target pull request for qemu-4.1. This has a number of things that have accumulated while qemu-4.0 was frozen. * A number of emulated MMU improvements from Ben Herrenschmidt * Assorted cleanups fro Greg Kurz * A large set of mostly mechanical cleanups from me to make target/ppc much closer to compliant with the modern coding style * Support for passthrough of NVIDIA GPUs using NVLink2 As well as some other assorted fixes. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAlzCnusACgkQbDjKyiDZ s5LfhhAAuem5UBGKPKPj33c87HC+GGG+S4y89ic3ebyKplWulGgouHCa4Dnc7Y5m 9MfIEcljRDpuRJCEONo6yg9aaRb3cW2Go9TpTwxmF8o1suG/v5bIQIdiRbBuMa2t yhNujVg5kkWSU1G4mCZjL9FS2ADPsxsKZVd73DPEqjlNJg981+2qtSnfR8SXhfnk dSSKxyfC6Hq1+uhGkLI+xtft+BCTWOstjz+efHpZ5l2mbiaMeh7zMKrIXXy/FtKA ufIyxbZznMS5MAZk7t90YldznfwOCqfh3di1kx8GTZ40LkBKbuI5LLHTG0sT75z5 LHwFuLkBgWmS8RyIRRh9opr7ifrayHx8bQFpW368Qu+PbPzUCcTVIrWUfPmaNR74 CkYJvhiYZfTwKtUeP7b2wUkHpZF4KINI4TKNaS4QAlm3DNbO67DFYkBrytpXsSzv smEpe+sqlbY40olw9q4ESP80r+kGdEPLkRjfdj0R7qS4fsqAH1bjuSkNqlPaCTJQ hNsoz2D+f56z0bBq4x8FRzDpqnBkdy4x6PlLxkJuAaV7WAtvq7n7tiMA3TRr/rIB OYFP2xPNajjP8MfyOB94+S4WDltmsgXoM7HyyvrKp2JBpe7mFjpep5fMp5GUpweV OOYrTsN1Nuu3kFpeimEc+IOyp1BWXnJF4vHhKTOqHeqZEs5Fgus= =RpAK -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.1-20190426' into staging ppc patch queue 2019-04-26 Here's the first ppc target pull request for qemu-4.1. This has a number of things that have accumulated while qemu-4.0 was frozen. * A number of emulated MMU improvements from Ben Herrenschmidt * Assorted cleanups fro Greg Kurz * A large set of mostly mechanical cleanups from me to make target/ppc much closer to compliant with the modern coding style * Support for passthrough of NVIDIA GPUs using NVLink2 As well as some other assorted fixes. # gpg: Signature made Fri 26 Apr 2019 07:02:19 BST # gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full] # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full] # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full] # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown] # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-4.1-20190426: (36 commits) target/ppc: improve performance of large BAT invalidations ppc/hash32: Rework R and C bit updates ppc/hash64: Rework R and C bit updates ppc/spapr: Use proper HPTE accessors for H_READ target/ppc: Don't check UPRT in radix mode when in HV real mode target/ppc/kvm: Convert DPRINTF to traces target/ppc/trace-events: Fix trivial typo spapr: Drop duplicate PCI swizzle code spapr_pci: Get rid of duplicate code for node name creation target/ppc: Style fixes for translate/spe-impl.inc.c target/ppc: Style fixes for translate/vmx-impl.inc.c target/ppc: Style fixes for translate/vsx-impl.inc.c target/ppc: Style fixes for translate/fp-impl.inc.c target/ppc: Style fixes for translate.c target/ppc: Style fixes for translate_init.inc.c target/ppc: Style fixes for monitor.c target/ppc: Style fixes for mmu_helper.c target/ppc: Style fixes for mmu-hash64.[ch] target/ppc: Style fixes for mmu-hash32.[ch] target/ppc: Style fixes for misc_helper.c ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
9ec34ecc97
|
@ -1556,7 +1556,7 @@ void pci_device_set_intx_routing_notifier(PCIDevice *dev,
|
||||||
*/
|
*/
|
||||||
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
|
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
|
||||||
{
|
{
|
||||||
return (pin + PCI_SLOT(pci_dev->devfn)) % PCI_NUM_PINS;
|
return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin);
|
||||||
}
|
}
|
||||||
|
|
||||||
/***********************************************************/
|
/***********************************************************/
|
||||||
|
|
|
@ -9,7 +9,7 @@ obj-$(CONFIG_SPAPR_RNG) += spapr_rng.o
|
||||||
# IBM PowerNV
|
# IBM PowerNV
|
||||||
obj-$(CONFIG_POWERNV) += pnv.o pnv_xscom.o pnv_core.o pnv_lpc.o pnv_psi.o pnv_occ.o pnv_bmc.o
|
obj-$(CONFIG_POWERNV) += pnv.o pnv_xscom.o pnv_core.o pnv_lpc.o pnv_psi.o pnv_occ.o pnv_bmc.o
|
||||||
ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy)
|
ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy)
|
||||||
obj-y += spapr_pci_vfio.o
|
obj-y += spapr_pci_vfio.o spapr_pci_nvlink2.o
|
||||||
endif
|
endif
|
||||||
obj-$(CONFIG_PSERIES) += spapr_rtas_ddw.o
|
obj-$(CONFIG_PSERIES) += spapr_rtas_ddw.o
|
||||||
# PowerPC 4xx boards
|
# PowerPC 4xx boards
|
||||||
|
|
|
@ -40,7 +40,6 @@
|
||||||
#include "hw/ide.h"
|
#include "hw/ide.h"
|
||||||
#include "hw/loader.h"
|
#include "hw/loader.h"
|
||||||
#include "hw/timer/mc146818rtc.h"
|
#include "hw/timer/mc146818rtc.h"
|
||||||
#include "hw/input/i8042.h"
|
|
||||||
#include "hw/isa/pc87312.h"
|
#include "hw/isa/pc87312.h"
|
||||||
#include "hw/net/ne2000-isa.h"
|
#include "hw/net/ne2000-isa.h"
|
||||||
#include "sysemu/arch_init.h"
|
#include "sysemu/arch_init.h"
|
||||||
|
|
|
@ -1034,12 +1034,13 @@ static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
|
||||||
0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE),
|
0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE),
|
||||||
cpu_to_be32(max_cpus / smp_threads),
|
cpu_to_be32(max_cpus / smp_threads),
|
||||||
};
|
};
|
||||||
|
uint32_t maxdomain = cpu_to_be32(spapr->gpu_numa_id > 1 ? 1 : 0);
|
||||||
uint32_t maxdomains[] = {
|
uint32_t maxdomains[] = {
|
||||||
cpu_to_be32(4),
|
cpu_to_be32(4),
|
||||||
cpu_to_be32(0),
|
maxdomain,
|
||||||
cpu_to_be32(0),
|
maxdomain,
|
||||||
cpu_to_be32(0),
|
maxdomain,
|
||||||
cpu_to_be32(nb_numa_nodes ? nb_numa_nodes : 1),
|
cpu_to_be32(spapr->gpu_numa_id),
|
||||||
};
|
};
|
||||||
|
|
||||||
_FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
|
_FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
|
||||||
|
@ -1519,10 +1520,10 @@ static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
|
||||||
/* Nothing to do for qemu managed HPT */
|
/* Nothing to do for qemu managed HPT */
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
|
void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
|
||||||
uint64_t pte0, uint64_t pte1)
|
uint64_t pte0, uint64_t pte1)
|
||||||
{
|
{
|
||||||
SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
|
SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp);
|
||||||
hwaddr offset = ptex * HASH_PTE_SIZE_64;
|
hwaddr offset = ptex * HASH_PTE_SIZE_64;
|
||||||
|
|
||||||
if (!spapr->htab) {
|
if (!spapr->htab) {
|
||||||
|
@ -1550,6 +1551,38 @@ static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex,
|
||||||
|
uint64_t pte1)
|
||||||
|
{
|
||||||
|
hwaddr offset = ptex * HASH_PTE_SIZE_64 + 15;
|
||||||
|
SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
|
||||||
|
|
||||||
|
if (!spapr->htab) {
|
||||||
|
/* There should always be a hash table when this is called */
|
||||||
|
error_report("spapr_hpte_set_c called with no hash table !");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The HW performs a non-atomic byte update */
|
||||||
|
stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex,
|
||||||
|
uint64_t pte1)
|
||||||
|
{
|
||||||
|
hwaddr offset = ptex * HASH_PTE_SIZE_64 + 14;
|
||||||
|
SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
|
||||||
|
|
||||||
|
if (!spapr->htab) {
|
||||||
|
/* There should always be a hash table when this is called */
|
||||||
|
error_report("spapr_hpte_set_r called with no hash table !");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The HW performs a non-atomic byte update */
|
||||||
|
stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01);
|
||||||
|
}
|
||||||
|
|
||||||
int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
|
int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
|
||||||
{
|
{
|
||||||
int shift;
|
int shift;
|
||||||
|
@ -1698,6 +1731,16 @@ static void spapr_machine_reset(void)
|
||||||
spapr_irq_msi_reset(spapr);
|
spapr_irq_msi_reset(spapr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NVLink2-connected GPU RAM needs to be placed on a separate NUMA node.
|
||||||
|
* We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is
|
||||||
|
* called from vPHB reset handler so we initialize the counter here.
|
||||||
|
* If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM
|
||||||
|
* must be equally distant from any other node.
|
||||||
|
* The final value of spapr->gpu_numa_id is going to be written to
|
||||||
|
* max-associativity-domains in spapr_build_fdt().
|
||||||
|
*/
|
||||||
|
spapr->gpu_numa_id = MAX(1, nb_numa_nodes);
|
||||||
qemu_devices_reset();
|
qemu_devices_reset();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3907,7 +3950,9 @@ static void spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||||
smc->phb_placement(spapr, sphb->index,
|
smc->phb_placement(spapr, sphb->index,
|
||||||
&sphb->buid, &sphb->io_win_addr,
|
&sphb->buid, &sphb->io_win_addr,
|
||||||
&sphb->mem_win_addr, &sphb->mem64_win_addr,
|
&sphb->mem_win_addr, &sphb->mem64_win_addr,
|
||||||
windows_supported, sphb->dma_liobn, errp);
|
windows_supported, sphb->dma_liobn,
|
||||||
|
&sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr,
|
||||||
|
errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||||
|
@ -4108,7 +4153,8 @@ static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
|
||||||
static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
|
static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
|
||||||
uint64_t *buid, hwaddr *pio,
|
uint64_t *buid, hwaddr *pio,
|
||||||
hwaddr *mmio32, hwaddr *mmio64,
|
hwaddr *mmio32, hwaddr *mmio64,
|
||||||
unsigned n_dma, uint32_t *liobns, Error **errp)
|
unsigned n_dma, uint32_t *liobns,
|
||||||
|
hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* New-style PHB window placement.
|
* New-style PHB window placement.
|
||||||
|
@ -4153,6 +4199,9 @@ static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
|
||||||
*pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
|
*pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
|
||||||
*mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
|
*mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
|
||||||
*mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
|
*mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
|
||||||
|
|
||||||
|
*nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE;
|
||||||
|
*nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
|
static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
|
||||||
|
@ -4274,7 +4323,8 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
|
||||||
vhc->hpt_mask = spapr_hpt_mask;
|
vhc->hpt_mask = spapr_hpt_mask;
|
||||||
vhc->map_hptes = spapr_map_hptes;
|
vhc->map_hptes = spapr_map_hptes;
|
||||||
vhc->unmap_hptes = spapr_unmap_hptes;
|
vhc->unmap_hptes = spapr_unmap_hptes;
|
||||||
vhc->store_hpte = spapr_store_hpte;
|
vhc->hpte_set_c = spapr_hpte_set_c;
|
||||||
|
vhc->hpte_set_r = spapr_hpte_set_r;
|
||||||
vhc->get_pate = spapr_get_pate;
|
vhc->get_pate = spapr_get_pate;
|
||||||
vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
|
vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
|
||||||
xic->ics_get = spapr_ics_get;
|
xic->ics_get = spapr_ics_get;
|
||||||
|
@ -4368,6 +4418,18 @@ DEFINE_SPAPR_MACHINE(4_0, "4.0", false);
|
||||||
/*
|
/*
|
||||||
* pseries-3.1
|
* pseries-3.1
|
||||||
*/
|
*/
|
||||||
|
static void phb_placement_3_1(SpaprMachineState *spapr, uint32_t index,
|
||||||
|
uint64_t *buid, hwaddr *pio,
|
||||||
|
hwaddr *mmio32, hwaddr *mmio64,
|
||||||
|
unsigned n_dma, uint32_t *liobns,
|
||||||
|
hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
|
||||||
|
{
|
||||||
|
spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, liobns,
|
||||||
|
nv2gpa, nv2atsd, errp);
|
||||||
|
*nv2gpa = 0;
|
||||||
|
*nv2atsd = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void spapr_machine_3_1_class_options(MachineClass *mc)
|
static void spapr_machine_3_1_class_options(MachineClass *mc)
|
||||||
{
|
{
|
||||||
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
||||||
|
@ -4383,6 +4445,7 @@ static void spapr_machine_3_1_class_options(MachineClass *mc)
|
||||||
smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
|
smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
|
||||||
smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
|
smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
|
||||||
smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
|
smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
|
||||||
|
smc->phb_placement = phb_placement_3_1;
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFINE_SPAPR_MACHINE(3_1, "3.1", false);
|
DEFINE_SPAPR_MACHINE(3_1, "3.1", false);
|
||||||
|
@ -4514,7 +4577,8 @@ DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
|
||||||
static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
|
static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
|
||||||
uint64_t *buid, hwaddr *pio,
|
uint64_t *buid, hwaddr *pio,
|
||||||
hwaddr *mmio32, hwaddr *mmio64,
|
hwaddr *mmio32, hwaddr *mmio64,
|
||||||
unsigned n_dma, uint32_t *liobns, Error **errp)
|
unsigned n_dma, uint32_t *liobns,
|
||||||
|
hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
|
||||||
{
|
{
|
||||||
/* Legacy PHB placement for pseries-2.7 and earlier machine types */
|
/* Legacy PHB placement for pseries-2.7 and earlier machine types */
|
||||||
const uint64_t base_buid = 0x800000020000000ULL;
|
const uint64_t base_buid = 0x800000020000000ULL;
|
||||||
|
@ -4558,6 +4622,9 @@ static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
|
||||||
* fallback behaviour of automatically splitting a large "32-bit"
|
* fallback behaviour of automatically splitting a large "32-bit"
|
||||||
* window into contiguous 32-bit and 64-bit windows
|
* window into contiguous 32-bit and 64-bit windows
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
*nv2gpa = 0;
|
||||||
|
*nv2atsd = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spapr_machine_2_7_class_options(MachineClass *mc)
|
static void spapr_machine_2_7_class_options(MachineClass *mc)
|
||||||
|
|
|
@ -118,7 +118,7 @@ static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||||
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
|
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
ppc_hash64_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
|
spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
|
||||||
|
|
||||||
args[0] = ptex + slot;
|
args[0] = ptex + slot;
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
|
@ -131,7 +131,8 @@ typedef enum {
|
||||||
REMOVE_HW = 3,
|
REMOVE_HW = 3,
|
||||||
} RemoveResult;
|
} RemoveResult;
|
||||||
|
|
||||||
static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
|
static RemoveResult remove_hpte(PowerPCCPU *cpu
|
||||||
|
, target_ulong ptex,
|
||||||
target_ulong avpn,
|
target_ulong avpn,
|
||||||
target_ulong flags,
|
target_ulong flags,
|
||||||
target_ulong *vp, target_ulong *rp)
|
target_ulong *vp, target_ulong *rp)
|
||||||
|
@ -155,7 +156,7 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
|
||||||
}
|
}
|
||||||
*vp = v;
|
*vp = v;
|
||||||
*rp = r;
|
*rp = r;
|
||||||
ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
|
spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
|
||||||
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
|
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
|
||||||
return REMOVE_SUCCESS;
|
return REMOVE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -289,13 +290,13 @@ static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||||
r |= (flags << 55) & HPTE64_R_PP0;
|
r |= (flags << 55) & HPTE64_R_PP0;
|
||||||
r |= (flags << 48) & HPTE64_R_KEY_HI;
|
r |= (flags << 48) & HPTE64_R_KEY_HI;
|
||||||
r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
|
r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
|
||||||
ppc_hash64_store_hpte(cpu, ptex,
|
spapr_store_hpte(cpu, ptex,
|
||||||
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
|
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
|
||||||
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
|
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
|
||||||
/* Flush the tlb */
|
/* Flush the tlb */
|
||||||
check_tlb_flush(env, true);
|
check_tlb_flush(env, true);
|
||||||
/* Don't need a memory barrier, due to qemu's global lock */
|
/* Don't need a memory barrier, due to qemu's global lock */
|
||||||
ppc_hash64_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
|
spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,8 +305,8 @@ static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||||
{
|
{
|
||||||
target_ulong flags = args[0];
|
target_ulong flags = args[0];
|
||||||
target_ulong ptex = args[1];
|
target_ulong ptex = args[1];
|
||||||
uint8_t *hpte;
|
|
||||||
int i, ridx, n_entries = 1;
|
int i, ridx, n_entries = 1;
|
||||||
|
const ppc_hash_pte64_t *hptes;
|
||||||
|
|
||||||
if (!valid_ptex(cpu, ptex)) {
|
if (!valid_ptex(cpu, ptex)) {
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
@ -317,13 +318,12 @@ static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||||
n_entries = 4;
|
n_entries = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
hpte = spapr->htab + (ptex * HASH_PTE_SIZE_64);
|
hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries);
|
||||||
|
|
||||||
for (i = 0, ridx = 0; i < n_entries; i++) {
|
for (i = 0, ridx = 0; i < n_entries; i++) {
|
||||||
args[ridx++] = ldq_p(hpte);
|
args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i);
|
||||||
args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
|
args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i);
|
||||||
hpte += HASH_PTE_SIZE_64;
|
|
||||||
}
|
}
|
||||||
|
ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries);
|
||||||
|
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,36 +67,11 @@ void spapr_irq_msi_reset(SpaprMachineState *spapr)
|
||||||
* XICS IRQ backend.
|
* XICS IRQ backend.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static ICSState *spapr_ics_create(SpaprMachineState *spapr,
|
|
||||||
int nr_irqs, Error **errp)
|
|
||||||
{
|
|
||||||
Error *local_err = NULL;
|
|
||||||
Object *obj;
|
|
||||||
|
|
||||||
obj = object_new(TYPE_ICS_SIMPLE);
|
|
||||||
object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
|
|
||||||
object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
|
|
||||||
&error_abort);
|
|
||||||
object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err);
|
|
||||||
if (local_err) {
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
object_property_set_bool(obj, true, "realized", &local_err);
|
|
||||||
if (local_err) {
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ICS_BASE(obj);
|
|
||||||
|
|
||||||
error:
|
|
||||||
error_propagate(errp, local_err);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
|
static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
|
||||||
Error **errp)
|
Error **errp)
|
||||||
{
|
{
|
||||||
MachineState *machine = MACHINE(spapr);
|
MachineState *machine = MACHINE(spapr);
|
||||||
|
Object *obj;
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
bool xics_kvm = false;
|
bool xics_kvm = false;
|
||||||
|
|
||||||
|
@ -108,7 +83,8 @@ static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
|
||||||
if (machine_kernel_irqchip_required(machine) && !xics_kvm) {
|
if (machine_kernel_irqchip_required(machine) && !xics_kvm) {
|
||||||
error_prepend(&local_err,
|
error_prepend(&local_err,
|
||||||
"kernel_irqchip requested but unavailable: ");
|
"kernel_irqchip requested but unavailable: ");
|
||||||
goto error;
|
error_propagate(errp, local_err);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
error_free(local_err);
|
error_free(local_err);
|
||||||
local_err = NULL;
|
local_err = NULL;
|
||||||
|
@ -118,10 +94,18 @@ static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
|
||||||
xics_spapr_init(spapr);
|
xics_spapr_init(spapr);
|
||||||
}
|
}
|
||||||
|
|
||||||
spapr->ics = spapr_ics_create(spapr, nr_irqs, &local_err);
|
obj = object_new(TYPE_ICS_SIMPLE);
|
||||||
|
object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
|
||||||
error:
|
object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
|
||||||
|
&error_fatal);
|
||||||
|
object_property_set_int(obj, nr_irqs, "nr-irqs", &error_fatal);
|
||||||
|
object_property_set_bool(obj, true, "realized", &local_err);
|
||||||
|
if (local_err) {
|
||||||
error_propagate(errp, local_err);
|
error_propagate(errp, local_err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
spapr->ics = ICS_BASE(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ICS_IRQ_FREE(ics, srcno) \
|
#define ICS_IRQ_FREE(ics, srcno) \
|
||||||
|
|
|
@ -719,26 +719,10 @@ param_error_exit:
|
||||||
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
|
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pci_spapr_swizzle(int slot, int pin)
|
|
||||||
{
|
|
||||||
return (slot + pin) % PCI_NUM_PINS;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Here we need to convert pci_dev + irq_num to some unique value
|
|
||||||
* which is less than number of IRQs on the specific bus (4). We
|
|
||||||
* use standard PCI swizzling, that is (slot number + pin number)
|
|
||||||
* % 4.
|
|
||||||
*/
|
|
||||||
return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
|
static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Here we use the number returned by pci_spapr_map_irq to find a
|
* Here we use the number returned by pci_swizzle_map_irq_fn to find a
|
||||||
* corresponding qemu_irq.
|
* corresponding qemu_irq.
|
||||||
*/
|
*/
|
||||||
SpaprPhbState *phb = opaque;
|
SpaprPhbState *phb = opaque;
|
||||||
|
@ -1355,6 +1339,8 @@ static void spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
|
||||||
if (sphb->pcie_ecs && pci_is_express(dev)) {
|
if (sphb->pcie_ecs && pci_is_express(dev)) {
|
||||||
_FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1));
|
_FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spapr_phb_nvgpu_populate_pcidev_dt(dev, fdt, offset, sphb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* create OF node for pci device and required OF DT properties */
|
/* create OF node for pci device and required OF DT properties */
|
||||||
|
@ -1587,6 +1573,8 @@ static void spapr_phb_unrealize(DeviceState *dev, Error **errp)
|
||||||
int i;
|
int i;
|
||||||
const unsigned windows_supported = spapr_phb_windows_supported(sphb);
|
const unsigned windows_supported = spapr_phb_windows_supported(sphb);
|
||||||
|
|
||||||
|
spapr_phb_nvgpu_free(sphb);
|
||||||
|
|
||||||
if (sphb->msi) {
|
if (sphb->msi) {
|
||||||
g_hash_table_unref(sphb->msi);
|
g_hash_table_unref(sphb->msi);
|
||||||
sphb->msi = NULL;
|
sphb->msi = NULL;
|
||||||
|
@ -1762,7 +1750,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
|
||||||
&sphb->iowindow);
|
&sphb->iowindow);
|
||||||
|
|
||||||
bus = pci_register_root_bus(dev, NULL,
|
bus = pci_register_root_bus(dev, NULL,
|
||||||
pci_spapr_set_irq, pci_spapr_map_irq, sphb,
|
pci_spapr_set_irq, pci_swizzle_map_irq_fn, sphb,
|
||||||
&sphb->memspace, &sphb->iospace,
|
&sphb->memspace, &sphb->iospace,
|
||||||
PCI_DEVFN(0, 0), PCI_NUM_PINS,
|
PCI_DEVFN(0, 0), PCI_NUM_PINS,
|
||||||
TYPE_SPAPR_PHB_ROOT_BUS);
|
TYPE_SPAPR_PHB_ROOT_BUS);
|
||||||
|
@ -1898,8 +1886,14 @@ void spapr_phb_dma_reset(SpaprPhbState *sphb)
|
||||||
static void spapr_phb_reset(DeviceState *qdev)
|
static void spapr_phb_reset(DeviceState *qdev)
|
||||||
{
|
{
|
||||||
SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev);
|
SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev);
|
||||||
|
Error *errp = NULL;
|
||||||
|
|
||||||
spapr_phb_dma_reset(sphb);
|
spapr_phb_dma_reset(sphb);
|
||||||
|
spapr_phb_nvgpu_free(sphb);
|
||||||
|
spapr_phb_nvgpu_setup(sphb, &errp);
|
||||||
|
if (errp) {
|
||||||
|
error_report_err(errp);
|
||||||
|
}
|
||||||
|
|
||||||
/* Reset the IOMMU state */
|
/* Reset the IOMMU state */
|
||||||
object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
|
object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
|
||||||
|
@ -1932,6 +1926,8 @@ static Property spapr_phb_properties[] = {
|
||||||
pre_2_8_migration, false),
|
pre_2_8_migration, false),
|
||||||
DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState,
|
DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState,
|
||||||
pcie_ecs, true),
|
pcie_ecs, true),
|
||||||
|
DEFINE_PROP_UINT64("gpa", SpaprPhbState, nv2_gpa_win_addr, 0),
|
||||||
|
DEFINE_PROP_UINT64("atsd", SpaprPhbState, nv2_atsd_win_addr, 0),
|
||||||
DEFINE_PROP_END_OF_LIST(),
|
DEFINE_PROP_END_OF_LIST(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2164,7 +2160,6 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
|
||||||
uint32_t nr_msis, int *node_offset)
|
uint32_t nr_msis, int *node_offset)
|
||||||
{
|
{
|
||||||
int bus_off, i, j, ret;
|
int bus_off, i, j, ret;
|
||||||
gchar *nodename;
|
|
||||||
uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
|
uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
|
||||||
struct {
|
struct {
|
||||||
uint32_t hi;
|
uint32_t hi;
|
||||||
|
@ -2212,11 +2207,10 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
|
||||||
PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
|
PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
|
||||||
SpaprFdt s_fdt;
|
SpaprFdt s_fdt;
|
||||||
SpaprDrc *drc;
|
SpaprDrc *drc;
|
||||||
|
Error *errp = NULL;
|
||||||
|
|
||||||
/* Start populating the FDT */
|
/* Start populating the FDT */
|
||||||
nodename = g_strdup_printf("pci@%" PRIx64, phb->buid);
|
_FDT(bus_off = fdt_add_subnode(fdt, 0, phb->dtbusname));
|
||||||
_FDT(bus_off = fdt_add_subnode(fdt, 0, nodename));
|
|
||||||
g_free(nodename);
|
|
||||||
if (node_offset) {
|
if (node_offset) {
|
||||||
*node_offset = bus_off;
|
*node_offset = bus_off;
|
||||||
}
|
}
|
||||||
|
@ -2249,14 +2243,14 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Build the interrupt-map, this must matches what is done
|
/* Build the interrupt-map, this must matches what is done
|
||||||
* in pci_spapr_map_irq
|
* in pci_swizzle_map_irq_fn
|
||||||
*/
|
*/
|
||||||
_FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
|
_FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
|
||||||
&interrupt_map_mask, sizeof(interrupt_map_mask)));
|
&interrupt_map_mask, sizeof(interrupt_map_mask)));
|
||||||
for (i = 0; i < PCI_SLOT_MAX; i++) {
|
for (i = 0; i < PCI_SLOT_MAX; i++) {
|
||||||
for (j = 0; j < PCI_NUM_PINS; j++) {
|
for (j = 0; j < PCI_NUM_PINS; j++) {
|
||||||
uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
|
uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
|
||||||
int lsi_num = pci_spapr_swizzle(i, j);
|
int lsi_num = pci_swizzle(i, j);
|
||||||
|
|
||||||
irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
|
irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
|
||||||
irqmap[1] = 0;
|
irqmap[1] = 0;
|
||||||
|
@ -2304,6 +2298,12 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spapr_phb_nvgpu_populate_dt(phb, fdt, bus_off, &errp);
|
||||||
|
if (errp) {
|
||||||
|
error_report_err(errp);
|
||||||
|
}
|
||||||
|
spapr_phb_nvgpu_ram_populate_dt(phb, fdt);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,450 @@
|
||||||
|
/*
|
||||||
|
* QEMU sPAPR PCI for NVLink2 pass through
|
||||||
|
*
|
||||||
|
* Copyright (c) 2019 Alexey Kardashevskiy, IBM Corporation.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
* THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
#include "qemu/osdep.h"
|
||||||
|
#include "qapi/error.h"
|
||||||
|
#include "qemu-common.h"
|
||||||
|
#include "hw/pci/pci.h"
|
||||||
|
#include "hw/pci-host/spapr.h"
|
||||||
|
#include "qemu/error-report.h"
|
||||||
|
#include "hw/ppc/fdt.h"
|
||||||
|
#include "hw/pci/pci_bridge.h"
|
||||||
|
|
||||||
|
#define PHANDLE_PCIDEV(phb, pdev) (0x12000000 | \
|
||||||
|
(((phb)->index) << 16) | ((pdev)->devfn))
|
||||||
|
#define PHANDLE_GPURAM(phb, n) (0x110000FF | ((n) << 8) | \
|
||||||
|
(((phb)->index) << 16))
|
||||||
|
#define PHANDLE_NVLINK(phb, gn, nn) (0x00130000 | (((phb)->index) << 8) | \
|
||||||
|
((gn) << 4) | (nn))
|
||||||
|
|
||||||
|
#define SPAPR_GPU_NUMA_ID (cpu_to_be32(1))
|
||||||
|
|
||||||
|
struct spapr_phb_pci_nvgpu_config {
|
||||||
|
uint64_t nv2_ram_current;
|
||||||
|
uint64_t nv2_atsd_current;
|
||||||
|
int num; /* number of non empty (i.e. tgt!=0) entries in slots[] */
|
||||||
|
struct spapr_phb_pci_nvgpu_slot {
|
||||||
|
uint64_t tgt;
|
||||||
|
uint64_t gpa;
|
||||||
|
unsigned numa_id;
|
||||||
|
PCIDevice *gpdev;
|
||||||
|
int linknum;
|
||||||
|
struct {
|
||||||
|
uint64_t atsd_gpa;
|
||||||
|
PCIDevice *npdev;
|
||||||
|
uint32_t link_speed;
|
||||||
|
} links[NVGPU_MAX_LINKS];
|
||||||
|
} slots[NVGPU_MAX_NUM];
|
||||||
|
Error *errp;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct spapr_phb_pci_nvgpu_slot *
|
||||||
|
spapr_nvgpu_get_slot(struct spapr_phb_pci_nvgpu_config *nvgpus, uint64_t tgt)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* Search for partially collected "slot" */
|
||||||
|
for (i = 0; i < nvgpus->num; ++i) {
|
||||||
|
if (nvgpus->slots[i].tgt == tgt) {
|
||||||
|
return &nvgpus->slots[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nvgpus->num == ARRAY_SIZE(nvgpus->slots)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
i = nvgpus->num;
|
||||||
|
nvgpus->slots[i].tgt = tgt;
|
||||||
|
++nvgpus->num;
|
||||||
|
|
||||||
|
return &nvgpus->slots[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void spapr_pci_collect_nvgpu(struct spapr_phb_pci_nvgpu_config *nvgpus,
|
||||||
|
PCIDevice *pdev, uint64_t tgt,
|
||||||
|
MemoryRegion *mr, Error **errp)
|
||||||
|
{
|
||||||
|
MachineState *machine = MACHINE(qdev_get_machine());
|
||||||
|
SpaprMachineState *spapr = SPAPR_MACHINE(machine);
|
||||||
|
struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt);
|
||||||
|
|
||||||
|
if (!nvslot) {
|
||||||
|
error_setg(errp, "Found too many GPUs per vPHB");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
g_assert(!nvslot->gpdev);
|
||||||
|
nvslot->gpdev = pdev;
|
||||||
|
|
||||||
|
nvslot->gpa = nvgpus->nv2_ram_current;
|
||||||
|
nvgpus->nv2_ram_current += memory_region_size(mr);
|
||||||
|
nvslot->numa_id = spapr->gpu_numa_id;
|
||||||
|
++spapr->gpu_numa_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void spapr_pci_collect_nvnpu(struct spapr_phb_pci_nvgpu_config *nvgpus,
|
||||||
|
PCIDevice *pdev, uint64_t tgt,
|
||||||
|
MemoryRegion *mr, Error **errp)
|
||||||
|
{
|
||||||
|
struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt);
|
||||||
|
int j;
|
||||||
|
|
||||||
|
if (!nvslot) {
|
||||||
|
error_setg(errp, "Found too many NVLink bridges per vPHB");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
j = nvslot->linknum;
|
||||||
|
if (j == ARRAY_SIZE(nvslot->links)) {
|
||||||
|
error_setg(errp, "Found too many NVLink bridges per GPU");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
++nvslot->linknum;
|
||||||
|
|
||||||
|
g_assert(!nvslot->links[j].npdev);
|
||||||
|
nvslot->links[j].npdev = pdev;
|
||||||
|
nvslot->links[j].atsd_gpa = nvgpus->nv2_atsd_current;
|
||||||
|
nvgpus->nv2_atsd_current += memory_region_size(mr);
|
||||||
|
nvslot->links[j].link_speed =
|
||||||
|
object_property_get_uint(OBJECT(pdev), "nvlink2-link-speed", NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev,
|
||||||
|
void *opaque)
|
||||||
|
{
|
||||||
|
PCIBus *sec_bus;
|
||||||
|
Object *po = OBJECT(pdev);
|
||||||
|
uint64_t tgt = object_property_get_uint(po, "nvlink2-tgt", NULL);
|
||||||
|
|
||||||
|
if (tgt) {
|
||||||
|
Error *local_err = NULL;
|
||||||
|
struct spapr_phb_pci_nvgpu_config *nvgpus = opaque;
|
||||||
|
Object *mr_gpu = object_property_get_link(po, "nvlink2-mr[0]", NULL);
|
||||||
|
Object *mr_npu = object_property_get_link(po, "nvlink2-atsd-mr[0]",
|
||||||
|
NULL);
|
||||||
|
|
||||||
|
g_assert(mr_gpu || mr_npu);
|
||||||
|
if (mr_gpu) {
|
||||||
|
spapr_pci_collect_nvgpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_gpu),
|
||||||
|
&local_err);
|
||||||
|
} else {
|
||||||
|
spapr_pci_collect_nvnpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_npu),
|
||||||
|
&local_err);
|
||||||
|
}
|
||||||
|
error_propagate(&nvgpus->errp, local_err);
|
||||||
|
}
|
||||||
|
if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
|
||||||
|
PCI_HEADER_TYPE_BRIDGE)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
|
||||||
|
if (!sec_bus) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
|
||||||
|
spapr_phb_pci_collect_nvgpu, opaque);
|
||||||
|
}
|
||||||
|
|
||||||
|
void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp)
|
||||||
|
{
|
||||||
|
int i, j, valid_gpu_num;
|
||||||
|
PCIBus *bus;
|
||||||
|
|
||||||
|
/* Search for GPUs and NPUs */
|
||||||
|
if (!sphb->nv2_gpa_win_addr || !sphb->nv2_atsd_win_addr) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
sphb->nvgpus = g_new0(struct spapr_phb_pci_nvgpu_config, 1);
|
||||||
|
sphb->nvgpus->nv2_ram_current = sphb->nv2_gpa_win_addr;
|
||||||
|
sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr;
|
||||||
|
|
||||||
|
bus = PCI_HOST_BRIDGE(sphb)->bus;
|
||||||
|
pci_for_each_device(bus, pci_bus_num(bus),
|
||||||
|
spapr_phb_pci_collect_nvgpu, sphb->nvgpus);
|
||||||
|
|
||||||
|
if (sphb->nvgpus->errp) {
|
||||||
|
error_propagate(errp, sphb->nvgpus->errp);
|
||||||
|
sphb->nvgpus->errp = NULL;
|
||||||
|
goto cleanup_exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add found GPU RAM and ATSD MRs if found */
|
||||||
|
for (i = 0, valid_gpu_num = 0; i < sphb->nvgpus->num; ++i) {
|
||||||
|
Object *nvmrobj;
|
||||||
|
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
|
||||||
|
|
||||||
|
if (!nvslot->gpdev) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
nvmrobj = object_property_get_link(OBJECT(nvslot->gpdev),
|
||||||
|
"nvlink2-mr[0]", NULL);
|
||||||
|
/* ATSD is pointless without GPU RAM MR so skip those */
|
||||||
|
if (!nvmrobj) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
++valid_gpu_num;
|
||||||
|
memory_region_add_subregion(get_system_memory(), nvslot->gpa,
|
||||||
|
MEMORY_REGION(nvmrobj));
|
||||||
|
|
||||||
|
for (j = 0; j < nvslot->linknum; ++j) {
|
||||||
|
Object *atsdmrobj;
|
||||||
|
|
||||||
|
atsdmrobj = object_property_get_link(OBJECT(nvslot->links[j].npdev),
|
||||||
|
"nvlink2-atsd-mr[0]", NULL);
|
||||||
|
if (!atsdmrobj) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
memory_region_add_subregion(get_system_memory(),
|
||||||
|
nvslot->links[j].atsd_gpa,
|
||||||
|
MEMORY_REGION(atsdmrobj));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (valid_gpu_num) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
/* We did not find any interesting GPU */
|
||||||
|
cleanup_exit:
|
||||||
|
g_free(sphb->nvgpus);
|
||||||
|
sphb->nvgpus = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void spapr_phb_nvgpu_free(SpaprPhbState *sphb)
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
if (!sphb->nvgpus) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < sphb->nvgpus->num; ++i) {
|
||||||
|
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
|
||||||
|
Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
|
||||||
|
"nvlink2-mr[0]", NULL);
|
||||||
|
|
||||||
|
if (nv_mrobj) {
|
||||||
|
memory_region_del_subregion(get_system_memory(),
|
||||||
|
MEMORY_REGION(nv_mrobj));
|
||||||
|
}
|
||||||
|
for (j = 0; j < nvslot->linknum; ++j) {
|
||||||
|
PCIDevice *npdev = nvslot->links[j].npdev;
|
||||||
|
Object *atsd_mrobj;
|
||||||
|
atsd_mrobj = object_property_get_link(OBJECT(npdev),
|
||||||
|
"nvlink2-atsd-mr[0]", NULL);
|
||||||
|
if (atsd_mrobj) {
|
||||||
|
memory_region_del_subregion(get_system_memory(),
|
||||||
|
MEMORY_REGION(atsd_mrobj));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
g_free(sphb->nvgpus);
|
||||||
|
sphb->nvgpus = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off,
|
||||||
|
Error **errp)
|
||||||
|
{
|
||||||
|
int i, j, atsdnum = 0;
|
||||||
|
uint64_t atsd[8]; /* The existing limitation of known guests */
|
||||||
|
|
||||||
|
if (!sphb->nvgpus) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; (i < sphb->nvgpus->num) && (atsdnum < ARRAY_SIZE(atsd)); ++i) {
|
||||||
|
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
|
||||||
|
|
||||||
|
if (!nvslot->gpdev) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
for (j = 0; j < nvslot->linknum; ++j) {
|
||||||
|
if (!nvslot->links[j].atsd_gpa) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (atsdnum == ARRAY_SIZE(atsd)) {
|
||||||
|
error_report("Only %"PRIuPTR" ATSD registers supported",
|
||||||
|
ARRAY_SIZE(atsd));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
atsd[atsdnum] = cpu_to_be64(nvslot->links[j].atsd_gpa);
|
||||||
|
++atsdnum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!atsdnum) {
|
||||||
|
error_setg(errp, "No ATSD registers found");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!spapr_phb_eeh_available(sphb)) {
|
||||||
|
/*
|
||||||
|
* ibm,mmio-atsd contains ATSD registers; these belong to an NPU PHB
|
||||||
|
* which we do not emulate as a separate device. Instead we put
|
||||||
|
* ibm,mmio-atsd to the vPHB with GPU and make sure that we do not
|
||||||
|
* put GPUs from different IOMMU groups to the same vPHB to ensure
|
||||||
|
* that the guest will use ATSDs from the corresponding NPU.
|
||||||
|
*/
|
||||||
|
error_setg(errp, "ATSD requires separate vPHB per GPU IOMMU group");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
_FDT((fdt_setprop(fdt, bus_off, "ibm,mmio-atsd", atsd,
|
||||||
|
atsdnum * sizeof(atsd[0]))));
|
||||||
|
}
|
||||||
|
|
||||||
|
void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
|
||||||
|
{
|
||||||
|
int i, j, linkidx, npuoff;
|
||||||
|
char *npuname;
|
||||||
|
|
||||||
|
if (!sphb->nvgpus) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
npuname = g_strdup_printf("npuphb%d", sphb->index);
|
||||||
|
npuoff = fdt_add_subnode(fdt, 0, npuname);
|
||||||
|
_FDT(npuoff);
|
||||||
|
_FDT(fdt_setprop_cell(fdt, npuoff, "#address-cells", 1));
|
||||||
|
_FDT(fdt_setprop_cell(fdt, npuoff, "#size-cells", 0));
|
||||||
|
/* Advertise NPU as POWER9 so the guest can enable NPU2 contexts */
|
||||||
|
_FDT((fdt_setprop_string(fdt, npuoff, "compatible", "ibm,power9-npu")));
|
||||||
|
g_free(npuname);
|
||||||
|
|
||||||
|
for (i = 0, linkidx = 0; i < sphb->nvgpus->num; ++i) {
|
||||||
|
for (j = 0; j < sphb->nvgpus->slots[i].linknum; ++j) {
|
||||||
|
char *linkname = g_strdup_printf("link@%d", linkidx);
|
||||||
|
int off = fdt_add_subnode(fdt, npuoff, linkname);
|
||||||
|
|
||||||
|
_FDT(off);
|
||||||
|
/* _FDT((fdt_setprop_cell(fdt, off, "reg", linkidx))); */
|
||||||
|
_FDT((fdt_setprop_string(fdt, off, "compatible",
|
||||||
|
"ibm,npu-link")));
|
||||||
|
_FDT((fdt_setprop_cell(fdt, off, "phandle",
|
||||||
|
PHANDLE_NVLINK(sphb, i, j))));
|
||||||
|
_FDT((fdt_setprop_cell(fdt, off, "ibm,npu-link-index", linkidx)));
|
||||||
|
g_free(linkname);
|
||||||
|
++linkidx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add memory nodes for GPU RAM and mark them unusable */
|
||||||
|
for (i = 0; i < sphb->nvgpus->num; ++i) {
|
||||||
|
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
|
||||||
|
Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
|
||||||
|
"nvlink2-mr[0]", NULL);
|
||||||
|
uint32_t associativity[] = {
|
||||||
|
cpu_to_be32(0x4),
|
||||||
|
SPAPR_GPU_NUMA_ID,
|
||||||
|
SPAPR_GPU_NUMA_ID,
|
||||||
|
SPAPR_GPU_NUMA_ID,
|
||||||
|
cpu_to_be32(nvslot->numa_id)
|
||||||
|
};
|
||||||
|
uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL);
|
||||||
|
uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) };
|
||||||
|
char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa);
|
||||||
|
int off = fdt_add_subnode(fdt, 0, mem_name);
|
||||||
|
|
||||||
|
_FDT(off);
|
||||||
|
_FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
|
||||||
|
_FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg))));
|
||||||
|
_FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
|
||||||
|
sizeof(associativity))));
|
||||||
|
|
||||||
|
_FDT((fdt_setprop_string(fdt, off, "compatible",
|
||||||
|
"ibm,coherent-device-memory")));
|
||||||
|
|
||||||
|
mem_reg[1] = cpu_to_be64(0);
|
||||||
|
_FDT((fdt_setprop(fdt, off, "linux,usable-memory", mem_reg,
|
||||||
|
sizeof(mem_reg))));
|
||||||
|
_FDT((fdt_setprop_cell(fdt, off, "phandle",
|
||||||
|
PHANDLE_GPURAM(sphb, i))));
|
||||||
|
g_free(mem_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
|
||||||
|
SpaprPhbState *sphb)
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
if (!sphb->nvgpus) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < sphb->nvgpus->num; ++i) {
|
||||||
|
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
|
||||||
|
|
||||||
|
/* Skip "slot" without attached GPU */
|
||||||
|
if (!nvslot->gpdev) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (dev == nvslot->gpdev) {
|
||||||
|
uint32_t npus[nvslot->linknum];
|
||||||
|
|
||||||
|
for (j = 0; j < nvslot->linknum; ++j) {
|
||||||
|
PCIDevice *npdev = nvslot->links[j].npdev;
|
||||||
|
|
||||||
|
npus[j] = cpu_to_be32(PHANDLE_PCIDEV(sphb, npdev));
|
||||||
|
}
|
||||||
|
_FDT(fdt_setprop(fdt, offset, "ibm,npu", npus,
|
||||||
|
j * sizeof(npus[0])));
|
||||||
|
_FDT((fdt_setprop_cell(fdt, offset, "phandle",
|
||||||
|
PHANDLE_PCIDEV(sphb, dev))));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (j = 0; j < nvslot->linknum; ++j) {
|
||||||
|
if (dev != nvslot->links[j].npdev) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
_FDT((fdt_setprop_cell(fdt, offset, "phandle",
|
||||||
|
PHANDLE_PCIDEV(sphb, dev))));
|
||||||
|
_FDT(fdt_setprop_cell(fdt, offset, "ibm,gpu",
|
||||||
|
PHANDLE_PCIDEV(sphb, nvslot->gpdev)));
|
||||||
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,nvlink",
|
||||||
|
PHANDLE_NVLINK(sphb, i, j))));
|
||||||
|
/*
|
||||||
|
* If we ever want to emulate GPU RAM at the same location as on
|
||||||
|
* the host - here is the encoding GPA->TGT:
|
||||||
|
*
|
||||||
|
* gta = ((sphb->nv2_gpa >> 42) & 0x1) << 42;
|
||||||
|
* gta |= ((sphb->nv2_gpa >> 45) & 0x3) << 43;
|
||||||
|
* gta |= ((sphb->nv2_gpa >> 49) & 0x3) << 45;
|
||||||
|
* gta |= sphb->nv2_gpa & ((1UL << 43) - 1);
|
||||||
|
*/
|
||||||
|
_FDT(fdt_setprop_cell(fdt, offset, "memory-region",
|
||||||
|
PHANDLE_GPURAM(sphb, i)));
|
||||||
|
_FDT(fdt_setprop_u64(fdt, offset, "ibm,device-tgt-addr",
|
||||||
|
nvslot->tgt));
|
||||||
|
_FDT(fdt_setprop_cell(fdt, offset, "ibm,nvlink-speed",
|
||||||
|
nvslot->links[j].link_speed));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -404,7 +404,7 @@ void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn)
|
||||||
|
|
||||||
token -= RTAS_TOKEN_BASE;
|
token -= RTAS_TOKEN_BASE;
|
||||||
|
|
||||||
assert(!rtas_table[token].name);
|
assert(!name || !rtas_table[token].name);
|
||||||
|
|
||||||
rtas_table[token].name = name;
|
rtas_table[token].name = name;
|
||||||
rtas_table[token].fn = fn;
|
rtas_table[token].fn = fn;
|
||||||
|
|
|
@ -2180,3 +2180,134 @@ int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vfio_pci_nvlink2_get_tgt(Object *obj, Visitor *v,
|
||||||
|
const char *name,
|
||||||
|
void *opaque, Error **errp)
|
||||||
|
{
|
||||||
|
uint64_t tgt = (uintptr_t) opaque;
|
||||||
|
visit_type_uint64(v, name, &tgt, errp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vfio_pci_nvlink2_get_link_speed(Object *obj, Visitor *v,
|
||||||
|
const char *name,
|
||||||
|
void *opaque, Error **errp)
|
||||||
|
{
|
||||||
|
uint32_t link_speed = (uint32_t)(uintptr_t) opaque;
|
||||||
|
visit_type_uint32(v, name, &link_speed, errp);
|
||||||
|
}
|
||||||
|
|
||||||
|
int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
void *p;
|
||||||
|
struct vfio_region_info *nv2reg = NULL;
|
||||||
|
struct vfio_info_cap_header *hdr;
|
||||||
|
struct vfio_region_info_cap_nvlink2_ssatgt *cap;
|
||||||
|
VFIOQuirk *quirk;
|
||||||
|
|
||||||
|
ret = vfio_get_dev_region_info(&vdev->vbasedev,
|
||||||
|
VFIO_REGION_TYPE_PCI_VENDOR_TYPE |
|
||||||
|
PCI_VENDOR_ID_NVIDIA,
|
||||||
|
VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
|
||||||
|
&nv2reg);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr = vfio_get_region_info_cap(nv2reg, VFIO_REGION_INFO_CAP_NVLINK2_SSATGT);
|
||||||
|
if (!hdr) {
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto free_exit;
|
||||||
|
}
|
||||||
|
cap = (void *) hdr;
|
||||||
|
|
||||||
|
p = mmap(NULL, nv2reg->size, PROT_READ | PROT_WRITE | PROT_EXEC,
|
||||||
|
MAP_SHARED, vdev->vbasedev.fd, nv2reg->offset);
|
||||||
|
if (p == MAP_FAILED) {
|
||||||
|
ret = -errno;
|
||||||
|
goto free_exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
quirk = vfio_quirk_alloc(1);
|
||||||
|
memory_region_init_ram_ptr(&quirk->mem[0], OBJECT(vdev), "nvlink2-mr",
|
||||||
|
nv2reg->size, p);
|
||||||
|
QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next);
|
||||||
|
|
||||||
|
object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64",
|
||||||
|
vfio_pci_nvlink2_get_tgt, NULL, NULL,
|
||||||
|
(void *) (uintptr_t) cap->tgt, NULL);
|
||||||
|
trace_vfio_pci_nvidia_gpu_setup_quirk(vdev->vbasedev.name, cap->tgt,
|
||||||
|
nv2reg->size);
|
||||||
|
free_exit:
|
||||||
|
g_free(nv2reg);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
void *p;
|
||||||
|
struct vfio_region_info *atsdreg = NULL;
|
||||||
|
struct vfio_info_cap_header *hdr;
|
||||||
|
struct vfio_region_info_cap_nvlink2_ssatgt *captgt;
|
||||||
|
struct vfio_region_info_cap_nvlink2_lnkspd *capspeed;
|
||||||
|
VFIOQuirk *quirk;
|
||||||
|
|
||||||
|
ret = vfio_get_dev_region_info(&vdev->vbasedev,
|
||||||
|
VFIO_REGION_TYPE_PCI_VENDOR_TYPE |
|
||||||
|
PCI_VENDOR_ID_IBM,
|
||||||
|
VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
|
||||||
|
&atsdreg);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr = vfio_get_region_info_cap(atsdreg,
|
||||||
|
VFIO_REGION_INFO_CAP_NVLINK2_SSATGT);
|
||||||
|
if (!hdr) {
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto free_exit;
|
||||||
|
}
|
||||||
|
captgt = (void *) hdr;
|
||||||
|
|
||||||
|
hdr = vfio_get_region_info_cap(atsdreg,
|
||||||
|
VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD);
|
||||||
|
if (!hdr) {
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto free_exit;
|
||||||
|
}
|
||||||
|
capspeed = (void *) hdr;
|
||||||
|
|
||||||
|
/* Some NVLink bridges may not have assigned ATSD */
|
||||||
|
if (atsdreg->size) {
|
||||||
|
p = mmap(NULL, atsdreg->size, PROT_READ | PROT_WRITE | PROT_EXEC,
|
||||||
|
MAP_SHARED, vdev->vbasedev.fd, atsdreg->offset);
|
||||||
|
if (p == MAP_FAILED) {
|
||||||
|
ret = -errno;
|
||||||
|
goto free_exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
quirk = vfio_quirk_alloc(1);
|
||||||
|
memory_region_init_ram_device_ptr(&quirk->mem[0], OBJECT(vdev),
|
||||||
|
"nvlink2-atsd-mr", atsdreg->size, p);
|
||||||
|
QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next);
|
||||||
|
}
|
||||||
|
|
||||||
|
object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64",
|
||||||
|
vfio_pci_nvlink2_get_tgt, NULL, NULL,
|
||||||
|
(void *) (uintptr_t) captgt->tgt, NULL);
|
||||||
|
trace_vfio_pci_nvlink2_setup_quirk_ssatgt(vdev->vbasedev.name, captgt->tgt,
|
||||||
|
atsdreg->size);
|
||||||
|
|
||||||
|
object_property_add(OBJECT(vdev), "nvlink2-link-speed", "uint32",
|
||||||
|
vfio_pci_nvlink2_get_link_speed, NULL, NULL,
|
||||||
|
(void *) (uintptr_t) capspeed->link_speed, NULL);
|
||||||
|
trace_vfio_pci_nvlink2_setup_quirk_lnkspd(vdev->vbasedev.name,
|
||||||
|
capspeed->link_speed);
|
||||||
|
free_exit:
|
||||||
|
g_free(atsdreg);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
|
@ -3086,6 +3086,20 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (vdev->vendor_id == PCI_VENDOR_ID_NVIDIA) {
|
||||||
|
ret = vfio_pci_nvidia_v100_ram_init(vdev, errp);
|
||||||
|
if (ret && ret != -ENODEV) {
|
||||||
|
error_report("Failed to setup NVIDIA V100 GPU RAM");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vdev->vendor_id == PCI_VENDOR_ID_IBM) {
|
||||||
|
ret = vfio_pci_nvlink2_init(vdev, errp);
|
||||||
|
if (ret && ret != -ENODEV) {
|
||||||
|
error_report("Failed to setup NVlink2 bridge");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
vfio_register_err_notifier(vdev);
|
vfio_register_err_notifier(vdev);
|
||||||
vfio_register_req_notifier(vdev);
|
vfio_register_req_notifier(vdev);
|
||||||
vfio_setup_resetfn_quirk(vdev);
|
vfio_setup_resetfn_quirk(vdev);
|
||||||
|
|
|
@ -196,6 +196,8 @@ int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
|
||||||
int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
|
int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
|
||||||
struct vfio_region_info *info,
|
struct vfio_region_info *info,
|
||||||
Error **errp);
|
Error **errp);
|
||||||
|
int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp);
|
||||||
|
int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp);
|
||||||
|
|
||||||
void vfio_display_reset(VFIOPCIDevice *vdev);
|
void vfio_display_reset(VFIOPCIDevice *vdev);
|
||||||
int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);
|
int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);
|
||||||
|
|
|
@ -86,6 +86,10 @@ vfio_pci_igd_opregion_enabled(const char *name) "%s"
|
||||||
vfio_pci_igd_host_bridge_enabled(const char *name) "%s"
|
vfio_pci_igd_host_bridge_enabled(const char *name) "%s"
|
||||||
vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s"
|
vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s"
|
||||||
|
|
||||||
|
vfio_pci_nvidia_gpu_setup_quirk(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64
|
||||||
|
vfio_pci_nvlink2_setup_quirk_ssatgt(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64
|
||||||
|
vfio_pci_nvlink2_setup_quirk_lnkspd(const char *name, uint32_t link_speed) "%s link_speed=0x%x"
|
||||||
|
|
||||||
# common.c
|
# common.c
|
||||||
vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)"
|
vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)"
|
||||||
vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64
|
vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64
|
||||||
|
|
|
@ -87,6 +87,9 @@ struct SpaprPhbState {
|
||||||
uint32_t mig_liobn;
|
uint32_t mig_liobn;
|
||||||
hwaddr mig_mem_win_addr, mig_mem_win_size;
|
hwaddr mig_mem_win_addr, mig_mem_win_size;
|
||||||
hwaddr mig_io_win_addr, mig_io_win_size;
|
hwaddr mig_io_win_addr, mig_io_win_size;
|
||||||
|
hwaddr nv2_gpa_win_addr;
|
||||||
|
hwaddr nv2_atsd_win_addr;
|
||||||
|
struct spapr_phb_pci_nvgpu_config *nvgpus;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL
|
#define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL
|
||||||
|
@ -105,6 +108,22 @@ struct SpaprPhbState {
|
||||||
|
|
||||||
#define SPAPR_PCI_MSI_WINDOW 0x40000000000ULL
|
#define SPAPR_PCI_MSI_WINDOW 0x40000000000ULL
|
||||||
|
|
||||||
|
#define SPAPR_PCI_NV2RAM64_WIN_BASE SPAPR_PCI_LIMIT
|
||||||
|
#define SPAPR_PCI_NV2RAM64_WIN_SIZE (2 * TiB) /* For up to 6 GPUs 256GB each */
|
||||||
|
|
||||||
|
/* Max number of these GPUsper a physical box */
|
||||||
|
#define NVGPU_MAX_NUM 6
|
||||||
|
/* Max number of NVLinks per GPU in any physical box */
|
||||||
|
#define NVGPU_MAX_LINKS 3
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GPU RAM starts at 64TiB so huge DMA window to cover it all ends at 128TiB
|
||||||
|
* which is enough. We do not need DMA for ATSD so we put them at 128TiB.
|
||||||
|
*/
|
||||||
|
#define SPAPR_PCI_NV2ATSD_WIN_BASE (128 * TiB)
|
||||||
|
#define SPAPR_PCI_NV2ATSD_WIN_SIZE (NVGPU_MAX_NUM * NVGPU_MAX_LINKS * \
|
||||||
|
64 * KiB)
|
||||||
|
|
||||||
static inline qemu_irq spapr_phb_lsi_qirq(struct SpaprPhbState *phb, int pin)
|
static inline qemu_irq spapr_phb_lsi_qirq(struct SpaprPhbState *phb, int pin)
|
||||||
{
|
{
|
||||||
SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
||||||
|
@ -135,6 +154,13 @@ int spapr_phb_vfio_eeh_get_state(SpaprPhbState *sphb, int *state);
|
||||||
int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option);
|
int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option);
|
||||||
int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb);
|
int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb);
|
||||||
void spapr_phb_vfio_reset(DeviceState *qdev);
|
void spapr_phb_vfio_reset(DeviceState *qdev);
|
||||||
|
void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp);
|
||||||
|
void spapr_phb_nvgpu_free(SpaprPhbState *sphb);
|
||||||
|
void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off,
|
||||||
|
Error **errp);
|
||||||
|
void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt);
|
||||||
|
void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
|
||||||
|
SpaprPhbState *sphb);
|
||||||
#else
|
#else
|
||||||
static inline bool spapr_phb_eeh_available(SpaprPhbState *sphb)
|
static inline bool spapr_phb_eeh_available(SpaprPhbState *sphb)
|
||||||
{
|
{
|
||||||
|
@ -161,6 +187,25 @@ static inline int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb)
|
||||||
static inline void spapr_phb_vfio_reset(DeviceState *qdev)
|
static inline void spapr_phb_vfio_reset(DeviceState *qdev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
static inline void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline void spapr_phb_nvgpu_free(SpaprPhbState *sphb)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt,
|
||||||
|
int bus_off, Error **errp)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb,
|
||||||
|
void *fdt)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt,
|
||||||
|
int offset,
|
||||||
|
SpaprPhbState *sphb)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void spapr_phb_dma_reset(SpaprPhbState *sphb);
|
void spapr_phb_dma_reset(SpaprPhbState *sphb);
|
||||||
|
|
|
@ -413,6 +413,10 @@ void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
||||||
void pci_bus_irqs_cleanup(PCIBus *bus);
|
void pci_bus_irqs_cleanup(PCIBus *bus);
|
||||||
int pci_bus_get_irq_level(PCIBus *bus, int irq_num);
|
int pci_bus_get_irq_level(PCIBus *bus, int irq_num);
|
||||||
/* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */
|
/* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */
|
||||||
|
static inline int pci_swizzle(int slot, int pin)
|
||||||
|
{
|
||||||
|
return (slot + pin) % PCI_NUM_PINS;
|
||||||
|
}
|
||||||
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin);
|
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin);
|
||||||
PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
|
PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
|
||||||
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
||||||
|
|
|
@ -123,7 +123,8 @@ struct SpaprMachineClass {
|
||||||
void (*phb_placement)(SpaprMachineState *spapr, uint32_t index,
|
void (*phb_placement)(SpaprMachineState *spapr, uint32_t index,
|
||||||
uint64_t *buid, hwaddr *pio,
|
uint64_t *buid, hwaddr *pio,
|
||||||
hwaddr *mmio32, hwaddr *mmio64,
|
hwaddr *mmio32, hwaddr *mmio64,
|
||||||
unsigned n_dma, uint32_t *liobns, Error **errp);
|
unsigned n_dma, uint32_t *liobns, hwaddr *nv2gpa,
|
||||||
|
hwaddr *nv2atsd, Error **errp);
|
||||||
SpaprResizeHpt resize_hpt_default;
|
SpaprResizeHpt resize_hpt_default;
|
||||||
SpaprCapabilities default_caps;
|
SpaprCapabilities default_caps;
|
||||||
SpaprIrq *irq;
|
SpaprIrq *irq;
|
||||||
|
@ -199,6 +200,8 @@ struct SpaprMachineState {
|
||||||
|
|
||||||
bool cmd_line_caps[SPAPR_CAP_NUM];
|
bool cmd_line_caps[SPAPR_CAP_NUM];
|
||||||
SpaprCapabilities def, eff, mig;
|
SpaprCapabilities def, eff, mig;
|
||||||
|
|
||||||
|
unsigned gpu_numa_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define H_SUCCESS 0
|
#define H_SUCCESS 0
|
||||||
|
@ -672,6 +675,10 @@ typedef void (*spapr_rtas_fn)(PowerPCCPU *cpu, SpaprMachineState *sm,
|
||||||
uint32_t nargs, target_ulong args,
|
uint32_t nargs, target_ulong args,
|
||||||
uint32_t nret, target_ulong rets);
|
uint32_t nret, target_ulong rets);
|
||||||
void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn);
|
void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn);
|
||||||
|
static inline void spapr_rtas_unregister(int token)
|
||||||
|
{
|
||||||
|
spapr_rtas_register(token, NULL, NULL);
|
||||||
|
}
|
||||||
target_ulong spapr_rtas_call(PowerPCCPU *cpu, SpaprMachineState *sm,
|
target_ulong spapr_rtas_call(PowerPCCPU *cpu, SpaprMachineState *sm,
|
||||||
uint32_t token, uint32_t nargs, target_ulong args,
|
uint32_t token, uint32_t nargs, target_ulong args,
|
||||||
uint32_t nret, target_ulong rets);
|
uint32_t nret, target_ulong rets);
|
||||||
|
@ -777,6 +784,8 @@ void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift,
|
||||||
Error **errp);
|
Error **errp);
|
||||||
void spapr_clear_pending_events(SpaprMachineState *spapr);
|
void spapr_clear_pending_events(SpaprMachineState *spapr);
|
||||||
int spapr_max_server_number(SpaprMachineState *spapr);
|
int spapr_max_server_number(SpaprMachineState *spapr);
|
||||||
|
void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
|
||||||
|
uint64_t pte0, uint64_t pte1);
|
||||||
|
|
||||||
/* DRC callbacks. */
|
/* DRC callbacks. */
|
||||||
void spapr_core_release(DeviceState *dev);
|
void spapr_core_release(DeviceState *dev);
|
||||||
|
|
|
@ -393,7 +393,8 @@ enum {
|
||||||
CPU_POWERPC_RS64IV = 0x00370000,
|
CPU_POWERPC_RS64IV = 0x00370000,
|
||||||
#endif /* defined(TARGET_PPC64) */
|
#endif /* defined(TARGET_PPC64) */
|
||||||
/* Original POWER */
|
/* Original POWER */
|
||||||
/* XXX: should be POWER (RIOS), RSC3308, RSC4608,
|
/*
|
||||||
|
* XXX: should be POWER (RIOS), RSC3308, RSC4608,
|
||||||
* POWER2 (RIOS2) & RSC2 (P2SC) here
|
* POWER2 (RIOS2) & RSC2 (P2SC) here
|
||||||
*/
|
*/
|
||||||
/* PA Semi core */
|
/* PA Semi core */
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
#include "qemu-common.h"
|
#include "qemu-common.h"
|
||||||
#include "qemu/int128.h"
|
#include "qemu/int128.h"
|
||||||
|
|
||||||
//#define PPC_EMULATE_32BITS_HYPV
|
/* #define PPC_EMULATE_32BITS_HYPV */
|
||||||
|
|
||||||
#if defined(TARGET_PPC64)
|
#if defined(TARGET_PPC64)
|
||||||
/* PowerPC 64 definitions */
|
/* PowerPC 64 definitions */
|
||||||
|
@ -32,14 +32,19 @@
|
||||||
|
|
||||||
#define TCG_GUEST_DEFAULT_MO 0
|
#define TCG_GUEST_DEFAULT_MO 0
|
||||||
|
|
||||||
/* Note that the official physical address space bits is 62-M where M
|
/*
|
||||||
is implementation dependent. I've not looked up M for the set of
|
* Note that the official physical address space bits is 62-M where M
|
||||||
cpus we emulate at the system level. */
|
* is implementation dependent. I've not looked up M for the set of
|
||||||
|
* cpus we emulate at the system level.
|
||||||
|
*/
|
||||||
#define TARGET_PHYS_ADDR_SPACE_BITS 62
|
#define TARGET_PHYS_ADDR_SPACE_BITS 62
|
||||||
|
|
||||||
/* Note that the PPC environment architecture talks about 80 bit virtual
|
/*
|
||||||
addresses, with segmentation. Obviously that's not all visible to a
|
* Note that the PPC environment architecture talks about 80 bit
|
||||||
single process, which is all we're concerned with here. */
|
* virtual addresses, with segmentation. Obviously that's not all
|
||||||
|
* visible to a single process, which is all we're concerned with
|
||||||
|
* here.
|
||||||
|
*/
|
||||||
#ifdef TARGET_ABI32
|
#ifdef TARGET_ABI32
|
||||||
# define TARGET_VIRT_ADDR_SPACE_BITS 32
|
# define TARGET_VIRT_ADDR_SPACE_BITS 32
|
||||||
#else
|
#else
|
||||||
|
@ -237,9 +242,11 @@ struct ppc_spr_t {
|
||||||
const char *name;
|
const char *name;
|
||||||
target_ulong default_value;
|
target_ulong default_value;
|
||||||
#ifdef CONFIG_KVM
|
#ifdef CONFIG_KVM
|
||||||
/* We (ab)use the fact that all the SPRs will have ids for the
|
/*
|
||||||
|
* We (ab)use the fact that all the SPRs will have ids for the
|
||||||
* ONE_REG interface will have KVM_REG_PPC to use 0 as meaning,
|
* ONE_REG interface will have KVM_REG_PPC to use 0 as meaning,
|
||||||
* don't sync this */
|
* don't sync this
|
||||||
|
*/
|
||||||
uint64_t one_reg_id;
|
uint64_t one_reg_id;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -962,9 +969,10 @@ struct ppc_radix_page_info {
|
||||||
/*****************************************************************************/
|
/*****************************************************************************/
|
||||||
/* The whole PowerPC CPU context */
|
/* The whole PowerPC CPU context */
|
||||||
|
|
||||||
/* PowerPC needs eight modes for different hypervisor/supervisor/guest +
|
/*
|
||||||
* real/paged mode combinations. The other two modes are for external PID
|
* PowerPC needs eight modes for different hypervisor/supervisor/guest
|
||||||
* load/store.
|
* + real/paged mode combinations. The other two modes are for
|
||||||
|
* external PID load/store.
|
||||||
*/
|
*/
|
||||||
#define NB_MMU_MODES 10
|
#define NB_MMU_MODES 10
|
||||||
#define MMU_MODE8_SUFFIX _epl
|
#define MMU_MODE8_SUFFIX _epl
|
||||||
|
@ -976,8 +984,9 @@ struct ppc_radix_page_info {
|
||||||
#define PPC_CPU_INDIRECT_OPCODES_LEN 0x20
|
#define PPC_CPU_INDIRECT_OPCODES_LEN 0x20
|
||||||
|
|
||||||
struct CPUPPCState {
|
struct CPUPPCState {
|
||||||
/* First are the most commonly used resources
|
/*
|
||||||
* during translated code execution
|
* First are the most commonly used resources during translated
|
||||||
|
* code execution
|
||||||
*/
|
*/
|
||||||
/* general purpose registers */
|
/* general purpose registers */
|
||||||
target_ulong gpr[32];
|
target_ulong gpr[32];
|
||||||
|
@ -1023,8 +1032,8 @@ struct CPUPPCState {
|
||||||
/* High part of 128-bit helper return. */
|
/* High part of 128-bit helper return. */
|
||||||
uint64_t retxh;
|
uint64_t retxh;
|
||||||
|
|
||||||
int access_type; /* when a memory exception occurs, the access
|
/* when a memory exception occurs, the access type is stored here */
|
||||||
type is stored here */
|
int access_type;
|
||||||
|
|
||||||
CPU_COMMON
|
CPU_COMMON
|
||||||
|
|
||||||
|
@ -1072,8 +1081,10 @@ struct CPUPPCState {
|
||||||
/* SPE registers */
|
/* SPE registers */
|
||||||
uint64_t spe_acc;
|
uint64_t spe_acc;
|
||||||
uint32_t spe_fscr;
|
uint32_t spe_fscr;
|
||||||
/* SPE and Altivec can share a status since they will never be used
|
/*
|
||||||
* simultaneously */
|
* SPE and Altivec can share a status since they will never be
|
||||||
|
* used simultaneously
|
||||||
|
*/
|
||||||
float_status vec_status;
|
float_status vec_status;
|
||||||
|
|
||||||
/* Internal devices resources */
|
/* Internal devices resources */
|
||||||
|
@ -1103,7 +1114,8 @@ struct CPUPPCState {
|
||||||
int error_code;
|
int error_code;
|
||||||
uint32_t pending_interrupts;
|
uint32_t pending_interrupts;
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
/* This is the IRQ controller, which is implementation dependent
|
/*
|
||||||
|
* This is the IRQ controller, which is implementation dependent
|
||||||
* and only relevant when emulating a complete machine.
|
* and only relevant when emulating a complete machine.
|
||||||
*/
|
*/
|
||||||
uint32_t irq_input_state;
|
uint32_t irq_input_state;
|
||||||
|
@ -1117,7 +1129,8 @@ struct CPUPPCState {
|
||||||
hwaddr mpic_iack;
|
hwaddr mpic_iack;
|
||||||
/* true when the external proxy facility mode is enabled */
|
/* true when the external proxy facility mode is enabled */
|
||||||
bool mpic_proxy;
|
bool mpic_proxy;
|
||||||
/* set when the processor has an HV mode, thus HV priv
|
/*
|
||||||
|
* set when the processor has an HV mode, thus HV priv
|
||||||
* instructions and SPRs are diallowed if MSR:HV is 0
|
* instructions and SPRs are diallowed if MSR:HV is 0
|
||||||
*/
|
*/
|
||||||
bool has_hv_mode;
|
bool has_hv_mode;
|
||||||
|
@ -1149,8 +1162,10 @@ struct CPUPPCState {
|
||||||
|
|
||||||
/* booke timers */
|
/* booke timers */
|
||||||
|
|
||||||
/* Specifies bit locations of the Time Base used to signal a fixed timer
|
/*
|
||||||
* exception on a transition from 0 to 1. (watchdog or fixed-interval timer)
|
* Specifies bit locations of the Time Base used to signal a fixed
|
||||||
|
* timer exception on a transition from 0 to 1. (watchdog or
|
||||||
|
* fixed-interval timer)
|
||||||
*
|
*
|
||||||
* 0 selects the least significant bit.
|
* 0 selects the least significant bit.
|
||||||
* 63 selects the most significant bit.
|
* 63 selects the most significant bit.
|
||||||
|
@ -1250,8 +1265,8 @@ struct PPCVirtualHypervisorClass {
|
||||||
void (*unmap_hptes)(PPCVirtualHypervisor *vhyp,
|
void (*unmap_hptes)(PPCVirtualHypervisor *vhyp,
|
||||||
const ppc_hash_pte64_t *hptes,
|
const ppc_hash_pte64_t *hptes,
|
||||||
hwaddr ptex, int n);
|
hwaddr ptex, int n);
|
||||||
void (*store_hpte)(PPCVirtualHypervisor *vhyp, hwaddr ptex,
|
void (*hpte_set_c)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
|
||||||
uint64_t pte0, uint64_t pte1);
|
void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
|
||||||
void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry);
|
void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry);
|
||||||
target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp);
|
target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp);
|
||||||
};
|
};
|
||||||
|
@ -1290,11 +1305,12 @@ extern const struct VMStateDescription vmstate_ppc_cpu;
|
||||||
|
|
||||||
/*****************************************************************************/
|
/*****************************************************************************/
|
||||||
void ppc_translate_init(void);
|
void ppc_translate_init(void);
|
||||||
/* you can call this signal handler from your SIGBUS and SIGSEGV
|
/*
|
||||||
signal handlers to inform the virtual CPU of exceptions. non zero
|
* you can call this signal handler from your SIGBUS and SIGSEGV
|
||||||
is returned if the signal was handled by the virtual CPU. */
|
* signal handlers to inform the virtual CPU of exceptions. non zero
|
||||||
int cpu_ppc_signal_handler (int host_signum, void *pinfo,
|
* is returned if the signal was handled by the virtual CPU.
|
||||||
void *puc);
|
*/
|
||||||
|
int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc);
|
||||||
#if defined(CONFIG_USER_ONLY)
|
#if defined(CONFIG_USER_ONLY)
|
||||||
int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
|
int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
|
||||||
int mmu_idx);
|
int mmu_idx);
|
||||||
|
@ -1349,7 +1365,8 @@ static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn)
|
||||||
|
|
||||||
gprv = env->gpr[gprn];
|
gprv = env->gpr[gprn];
|
||||||
if (env->flags & POWERPC_FLAG_SPE) {
|
if (env->flags & POWERPC_FLAG_SPE) {
|
||||||
/* If the CPU implements the SPE extension, we have to get the
|
/*
|
||||||
|
* If the CPU implements the SPE extension, we have to get the
|
||||||
* high bits of the GPR from the gprh storage area
|
* high bits of the GPR from the gprh storage area
|
||||||
*/
|
*/
|
||||||
gprv &= 0xFFFFFFFFULL;
|
gprv &= 0xFFFFFFFFULL;
|
||||||
|
@ -2226,7 +2243,8 @@ enum {
|
||||||
};
|
};
|
||||||
|
|
||||||
/*****************************************************************************/
|
/*****************************************************************************/
|
||||||
/* Memory access type :
|
/*
|
||||||
|
* Memory access type :
|
||||||
* may be needed for precise access rights control and precise exceptions.
|
* may be needed for precise access rights control and precise exceptions.
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
|
@ -2242,7 +2260,8 @@ enum {
|
||||||
ACCESS_CACHE = 0x60, /* Cache manipulation */
|
ACCESS_CACHE = 0x60, /* Cache manipulation */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Hardware interruption sources:
|
/*
|
||||||
|
* Hardware interrupt sources:
|
||||||
* all those exception can be raised simulteaneously
|
* all those exception can be raised simulteaneously
|
||||||
*/
|
*/
|
||||||
/* Input pins definitions */
|
/* Input pins definitions */
|
||||||
|
@ -2325,9 +2344,11 @@ enum {
|
||||||
enum {
|
enum {
|
||||||
/* POWER7 input pins */
|
/* POWER7 input pins */
|
||||||
POWER7_INPUT_INT = 0,
|
POWER7_INPUT_INT = 0,
|
||||||
/* POWER7 probably has other inputs, but we don't care about them
|
/*
|
||||||
|
* POWER7 probably has other inputs, but we don't care about them
|
||||||
* for any existing machine. We can wire these up when we need
|
* for any existing machine. We can wire these up when we need
|
||||||
* them */
|
* them
|
||||||
|
*/
|
||||||
POWER7_INPUT_NB,
|
POWER7_INPUT_NB,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -25,9 +25,9 @@
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "helper_regs.h"
|
#include "helper_regs.h"
|
||||||
|
|
||||||
//#define DEBUG_OP
|
/* #define DEBUG_OP */
|
||||||
//#define DEBUG_SOFTWARE_TLB
|
/* #define DEBUG_SOFTWARE_TLB */
|
||||||
//#define DEBUG_EXCEPTIONS
|
/* #define DEBUG_EXCEPTIONS */
|
||||||
|
|
||||||
#ifdef DEBUG_EXCEPTIONS
|
#ifdef DEBUG_EXCEPTIONS
|
||||||
# define LOG_EXCP(...) qemu_log(__VA_ARGS__)
|
# define LOG_EXCP(...) qemu_log(__VA_ARGS__)
|
||||||
|
@ -126,8 +126,9 @@ static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail)
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note that this function should be greatly optimized
|
/*
|
||||||
* when called with a constant excp, from ppc_hw_interrupt
|
* Note that this function should be greatly optimized when called
|
||||||
|
* with a constant excp, from ppc_hw_interrupt
|
||||||
*/
|
*/
|
||||||
static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
{
|
{
|
||||||
|
@ -147,7 +148,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
msr = env->msr & ~0x783f0000ULL;
|
msr = env->msr & ~0x783f0000ULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* new interrupt handler msr preserves existing HV and ME unless
|
/*
|
||||||
|
* new interrupt handler msr preserves existing HV and ME unless
|
||||||
* explicitly overriden
|
* explicitly overriden
|
||||||
*/
|
*/
|
||||||
new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
|
new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
|
||||||
|
@ -166,7 +168,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
excp = powerpc_reset_wakeup(cs, env, excp, &msr);
|
excp = powerpc_reset_wakeup(cs, env, excp, &msr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Exception targetting modifiers
|
/*
|
||||||
|
* Exception targetting modifiers
|
||||||
*
|
*
|
||||||
* LPES0 is supported on POWER7/8/9
|
* LPES0 is supported on POWER7/8/9
|
||||||
* LPES1 is not supported (old iSeries mode)
|
* LPES1 is not supported (old iSeries mode)
|
||||||
|
@ -194,7 +197,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
ail = 0;
|
ail = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Hypervisor emulation assistance interrupt only exists on server
|
/*
|
||||||
|
* Hypervisor emulation assistance interrupt only exists on server
|
||||||
* arch 2.05 server or later. We also don't want to generate it if
|
* arch 2.05 server or later. We also don't want to generate it if
|
||||||
* we don't have HVB in msr_mask (PAPR mode).
|
* we don't have HVB in msr_mask (PAPR mode).
|
||||||
*/
|
*/
|
||||||
|
@ -229,8 +233,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
break;
|
break;
|
||||||
case POWERPC_EXCP_MCHECK: /* Machine check exception */
|
case POWERPC_EXCP_MCHECK: /* Machine check exception */
|
||||||
if (msr_me == 0) {
|
if (msr_me == 0) {
|
||||||
/* Machine check exception is not enabled.
|
/*
|
||||||
* Enter checkstop state.
|
* Machine check exception is not enabled. Enter
|
||||||
|
* checkstop state.
|
||||||
*/
|
*/
|
||||||
fprintf(stderr, "Machine check while not allowed. "
|
fprintf(stderr, "Machine check while not allowed. "
|
||||||
"Entering checkstop state\n");
|
"Entering checkstop state\n");
|
||||||
|
@ -242,8 +247,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
cpu_interrupt_exittb(cs);
|
cpu_interrupt_exittb(cs);
|
||||||
}
|
}
|
||||||
if (env->msr_mask & MSR_HVB) {
|
if (env->msr_mask & MSR_HVB) {
|
||||||
/* ISA specifies HV, but can be delivered to guest with HV clear
|
/*
|
||||||
* (e.g., see FWNMI in PAPR).
|
* ISA specifies HV, but can be delivered to guest with HV
|
||||||
|
* clear (e.g., see FWNMI in PAPR).
|
||||||
*/
|
*/
|
||||||
new_msr |= (target_ulong)MSR_HVB;
|
new_msr |= (target_ulong)MSR_HVB;
|
||||||
}
|
}
|
||||||
|
@ -294,9 +300,10 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
break;
|
break;
|
||||||
case POWERPC_EXCP_ALIGN: /* Alignment exception */
|
case POWERPC_EXCP_ALIGN: /* Alignment exception */
|
||||||
/* Get rS/rD and rA from faulting opcode */
|
/* Get rS/rD and rA from faulting opcode */
|
||||||
/* Note: the opcode fields will not be set properly for a direct
|
/*
|
||||||
* store load/store, but nobody cares as nobody actually uses
|
* Note: the opcode fields will not be set properly for a
|
||||||
* direct store segments.
|
* direct store load/store, but nobody cares as nobody
|
||||||
|
* actually uses direct store segments.
|
||||||
*/
|
*/
|
||||||
env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
|
env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
|
||||||
break;
|
break;
|
||||||
|
@ -310,7 +317,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FP exceptions always have NIP pointing to the faulting
|
/*
|
||||||
|
* FP exceptions always have NIP pointing to the faulting
|
||||||
* instruction, so always use store_next and claim we are
|
* instruction, so always use store_next and claim we are
|
||||||
* precise in the MSR.
|
* precise in the MSR.
|
||||||
*/
|
*/
|
||||||
|
@ -341,7 +349,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
dump_syscall(env);
|
dump_syscall(env);
|
||||||
lev = env->error_code;
|
lev = env->error_code;
|
||||||
|
|
||||||
/* We need to correct the NIP which in this case is supposed
|
/*
|
||||||
|
* We need to correct the NIP which in this case is supposed
|
||||||
* to point to the next instruction
|
* to point to the next instruction
|
||||||
*/
|
*/
|
||||||
env->nip += 4;
|
env->nip += 4;
|
||||||
|
@ -425,8 +434,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
new_msr |= ((target_ulong)1 << MSR_ME);
|
new_msr |= ((target_ulong)1 << MSR_ME);
|
||||||
}
|
}
|
||||||
if (env->msr_mask & MSR_HVB) {
|
if (env->msr_mask & MSR_HVB) {
|
||||||
/* ISA specifies HV, but can be delivered to guest with HV clear
|
/*
|
||||||
* (e.g., see FWNMI in PAPR, NMI injection in QEMU).
|
* ISA specifies HV, but can be delivered to guest with HV
|
||||||
|
* clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
|
||||||
*/
|
*/
|
||||||
new_msr |= (target_ulong)MSR_HVB;
|
new_msr |= (target_ulong)MSR_HVB;
|
||||||
} else {
|
} else {
|
||||||
|
@ -675,7 +685,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
env->spr[asrr1] = env->spr[srr1];
|
env->spr[asrr1] = env->spr[srr1];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sort out endianness of interrupt, this differs depending on the
|
/*
|
||||||
|
* Sort out endianness of interrupt, this differs depending on the
|
||||||
* CPU, the HV mode, etc...
|
* CPU, the HV mode, etc...
|
||||||
*/
|
*/
|
||||||
#ifdef TARGET_PPC64
|
#ifdef TARGET_PPC64
|
||||||
|
@ -716,8 +727,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
}
|
}
|
||||||
vector |= env->excp_prefix;
|
vector |= env->excp_prefix;
|
||||||
|
|
||||||
/* AIL only works if there is no HV transition and we are running with
|
/*
|
||||||
* translations enabled
|
* AIL only works if there is no HV transition and we are running
|
||||||
|
* with translations enabled
|
||||||
*/
|
*/
|
||||||
if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) ||
|
if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) ||
|
||||||
((new_msr & MSR_HVB) && !(msr & MSR_HVB))) {
|
((new_msr & MSR_HVB) && !(msr & MSR_HVB))) {
|
||||||
|
@ -745,8 +757,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
/* We don't use hreg_store_msr here as already have treated
|
/*
|
||||||
* any special case that could occur. Just store MSR and update hflags
|
* We don't use hreg_store_msr here as already have treated any
|
||||||
|
* special case that could occur. Just store MSR and update hflags
|
||||||
*
|
*
|
||||||
* Note: We *MUST* not use hreg_store_msr() as-is anyway because it
|
* Note: We *MUST* not use hreg_store_msr() as-is anyway because it
|
||||||
* will prevent setting of the HV bit which some exceptions might need
|
* will prevent setting of the HV bit which some exceptions might need
|
||||||
|
@ -762,8 +775,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
|
||||||
/* Reset the reservation */
|
/* Reset the reservation */
|
||||||
env->reserve_addr = -1;
|
env->reserve_addr = -1;
|
||||||
|
|
||||||
/* Any interrupt is context synchronizing, check if TCG TLB
|
/*
|
||||||
* needs a delayed flush on ppc64
|
* Any interrupt is context synchronizing, check if TCG TLB needs
|
||||||
|
* a delayed flush on ppc64
|
||||||
*/
|
*/
|
||||||
check_tlb_flush(env, false);
|
check_tlb_flush(env, false);
|
||||||
}
|
}
|
||||||
|
@ -1015,8 +1029,9 @@ void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
|
||||||
cs = CPU(ppc_env_get_cpu(env));
|
cs = CPU(ppc_env_get_cpu(env));
|
||||||
cs->halted = 1;
|
cs->halted = 1;
|
||||||
|
|
||||||
/* The architecture specifies that HDEC interrupts are
|
/*
|
||||||
* discarded in PM states
|
* The architecture specifies that HDEC interrupts are discarded
|
||||||
|
* in PM states
|
||||||
*/
|
*/
|
||||||
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
|
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
|
||||||
|
|
||||||
|
@ -1047,8 +1062,9 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
|
||||||
#if defined(DEBUG_OP)
|
#if defined(DEBUG_OP)
|
||||||
cpu_dump_rfi(env->nip, env->msr);
|
cpu_dump_rfi(env->nip, env->msr);
|
||||||
#endif
|
#endif
|
||||||
/* No need to raise an exception here,
|
/*
|
||||||
* as rfi is always the last insn of a TB
|
* No need to raise an exception here, as rfi is always the last
|
||||||
|
* insn of a TB
|
||||||
*/
|
*/
|
||||||
cpu_interrupt_exittb(cs);
|
cpu_interrupt_exittb(cs);
|
||||||
/* Reset the reservation */
|
/* Reset the reservation */
|
||||||
|
@ -1067,8 +1083,9 @@ void helper_rfi(CPUPPCState *env)
|
||||||
#if defined(TARGET_PPC64)
|
#if defined(TARGET_PPC64)
|
||||||
void helper_rfid(CPUPPCState *env)
|
void helper_rfid(CPUPPCState *env)
|
||||||
{
|
{
|
||||||
/* The architeture defines a number of rules for which bits
|
/*
|
||||||
* can change but in practice, we handle this in hreg_store_msr()
|
* The architeture defines a number of rules for which bits can
|
||||||
|
* change but in practice, we handle this in hreg_store_msr()
|
||||||
* which will be called by do_rfi(), so there is no need to filter
|
* which will be called by do_rfi(), so there is no need to filter
|
||||||
* here
|
* here
|
||||||
*/
|
*/
|
||||||
|
@ -1206,9 +1223,11 @@ static int book3s_dbell2irq(target_ulong rb)
|
||||||
{
|
{
|
||||||
int msg = rb & DBELL_TYPE_MASK;
|
int msg = rb & DBELL_TYPE_MASK;
|
||||||
|
|
||||||
/* A Directed Hypervisor Doorbell message is sent only if the
|
/*
|
||||||
|
* A Directed Hypervisor Doorbell message is sent only if the
|
||||||
* message type is 5. All other types are reserved and the
|
* message type is 5. All other types are reserved and the
|
||||||
* instruction is a no-op */
|
* instruction is a no-op
|
||||||
|
*/
|
||||||
return msg == DBELL_TYPE_DBELL_SERVER ? PPC_INTERRUPT_HDOORBELL : -1;
|
return msg == DBELL_TYPE_DBELL_SERVER ? PPC_INTERRUPT_HDOORBELL : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -90,10 +90,12 @@ uint32_t helper_tosingle(uint64_t arg)
|
||||||
ret = extract64(arg, 62, 2) << 30;
|
ret = extract64(arg, 62, 2) << 30;
|
||||||
ret |= extract64(arg, 29, 30);
|
ret |= extract64(arg, 29, 30);
|
||||||
} else {
|
} else {
|
||||||
/* Zero or Denormal result. If the exponent is in bounds for
|
/*
|
||||||
* a single-precision denormal result, extract the proper bits.
|
* Zero or Denormal result. If the exponent is in bounds for
|
||||||
* If the input is not zero, and the exponent is out of bounds,
|
* a single-precision denormal result, extract the proper
|
||||||
* then the result is undefined; this underflows to zero.
|
* bits. If the input is not zero, and the exponent is out of
|
||||||
|
* bounds, then the result is undefined; this underflows to
|
||||||
|
* zero.
|
||||||
*/
|
*/
|
||||||
ret = extract64(arg, 63, 1) << 31;
|
ret = extract64(arg, 63, 1) << 31;
|
||||||
if (unlikely(exp >= 874)) {
|
if (unlikely(exp >= 874)) {
|
||||||
|
@ -1789,7 +1791,8 @@ uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
|
||||||
#define float64_to_float64(x, env) x
|
#define float64_to_float64(x, env) x
|
||||||
|
|
||||||
|
|
||||||
/* VSX_ADD_SUB - VSX floating point add/subract
|
/*
|
||||||
|
* VSX_ADD_SUB - VSX floating point add/subract
|
||||||
* name - instruction mnemonic
|
* name - instruction mnemonic
|
||||||
* op - operation (add or sub)
|
* op - operation (add or sub)
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
|
@ -1872,7 +1875,8 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
|
||||||
do_float_check_status(env, GETPC());
|
do_float_check_status(env, GETPC());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* VSX_MUL - VSX floating point multiply
|
/*
|
||||||
|
* VSX_MUL - VSX floating point multiply
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* tp - type (float32 or float64)
|
* tp - type (float32 or float64)
|
||||||
|
@ -1950,7 +1954,8 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
|
||||||
do_float_check_status(env, GETPC());
|
do_float_check_status(env, GETPC());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* VSX_DIV - VSX floating point divide
|
/*
|
||||||
|
* VSX_DIV - VSX floating point divide
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* tp - type (float32 or float64)
|
* tp - type (float32 or float64)
|
||||||
|
@ -2034,7 +2039,8 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
|
||||||
do_float_check_status(env, GETPC());
|
do_float_check_status(env, GETPC());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* VSX_RE - VSX floating point reciprocal estimate
|
/*
|
||||||
|
* VSX_RE - VSX floating point reciprocal estimate
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* tp - type (float32 or float64)
|
* tp - type (float32 or float64)
|
||||||
|
@ -2075,7 +2081,8 @@ VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
|
||||||
VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
|
VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
|
||||||
VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
|
VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
|
||||||
|
|
||||||
/* VSX_SQRT - VSX floating point square root
|
/*
|
||||||
|
* VSX_SQRT - VSX floating point square root
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* tp - type (float32 or float64)
|
* tp - type (float32 or float64)
|
||||||
|
@ -2124,7 +2131,8 @@ VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
|
||||||
VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
|
VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
|
||||||
VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
|
VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
|
||||||
|
|
||||||
/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
|
/*
|
||||||
|
*VSX_RSQRTE - VSX floating point reciprocal square root estimate
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* tp - type (float32 or float64)
|
* tp - type (float32 or float64)
|
||||||
|
@ -2174,7 +2182,8 @@ VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
|
||||||
VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
|
VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
|
||||||
VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
|
VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
|
||||||
|
|
||||||
/* VSX_TDIV - VSX floating point test for divide
|
/*
|
||||||
|
* VSX_TDIV - VSX floating point test for divide
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* tp - type (float32 or float64)
|
* tp - type (float32 or float64)
|
||||||
|
@ -2217,8 +2226,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
|
if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
|
||||||
/* XB is not zero because of the above check and */ \
|
/* \
|
||||||
/* so must be denormalized. */ \
|
* XB is not zero because of the above check and so \
|
||||||
|
* must be denormalized. \
|
||||||
|
*/ \
|
||||||
fg_flag = 1; \
|
fg_flag = 1; \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
|
@ -2231,7 +2242,8 @@ VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
|
||||||
VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
|
VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
|
||||||
VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
|
VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
|
||||||
|
|
||||||
/* VSX_TSQRT - VSX floating point test for square root
|
/*
|
||||||
|
* VSX_TSQRT - VSX floating point test for square root
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* tp - type (float32 or float64)
|
* tp - type (float32 or float64)
|
||||||
|
@ -2271,8 +2283,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
|
if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
|
||||||
/* XB is not zero because of the above check and */ \
|
/* \
|
||||||
/* therefore must be denormalized. */ \
|
* XB is not zero because of the above check and \
|
||||||
|
* therefore must be denormalized. \
|
||||||
|
*/ \
|
||||||
fg_flag = 1; \
|
fg_flag = 1; \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
|
@ -2285,7 +2299,8 @@ VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
|
||||||
VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
|
VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
|
||||||
VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
|
VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
|
||||||
|
|
||||||
/* VSX_MADD - VSX floating point muliply/add variations
|
/*
|
||||||
|
* VSX_MADD - VSX floating point muliply/add variations
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* tp - type (float32 or float64)
|
* tp - type (float32 or float64)
|
||||||
|
@ -2322,8 +2337,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
|
||||||
float_status tstat = env->fp_status; \
|
float_status tstat = env->fp_status; \
|
||||||
set_float_exception_flags(0, &tstat); \
|
set_float_exception_flags(0, &tstat); \
|
||||||
if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
|
if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
|
||||||
/* Avoid double rounding errors by rounding the intermediate */ \
|
/* \
|
||||||
/* result to odd. */ \
|
* Avoid double rounding errors by rounding the intermediate \
|
||||||
|
* result to odd. \
|
||||||
|
*/ \
|
||||||
set_float_rounding_mode(float_round_to_zero, &tstat); \
|
set_float_rounding_mode(float_round_to_zero, &tstat); \
|
||||||
xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
|
xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
|
||||||
maddflgs, &tstat); \
|
maddflgs, &tstat); \
|
||||||
|
@ -2388,7 +2405,8 @@ VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
|
||||||
VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
|
VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
|
||||||
VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
|
VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
|
||||||
|
|
||||||
/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
|
/*
|
||||||
|
* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* cmp - comparison operation
|
* cmp - comparison operation
|
||||||
* exp - expected result of comparison
|
* exp - expected result of comparison
|
||||||
|
@ -2604,7 +2622,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
|
||||||
VSX_SCALAR_CMPQ(xscmpoqp, 1)
|
VSX_SCALAR_CMPQ(xscmpoqp, 1)
|
||||||
VSX_SCALAR_CMPQ(xscmpuqp, 0)
|
VSX_SCALAR_CMPQ(xscmpuqp, 0)
|
||||||
|
|
||||||
/* VSX_MAX_MIN - VSX floating point maximum/minimum
|
/*
|
||||||
|
* VSX_MAX_MIN - VSX floating point maximum/minimum
|
||||||
* name - instruction mnemonic
|
* name - instruction mnemonic
|
||||||
* op - operation (max or min)
|
* op - operation (max or min)
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
|
@ -2733,7 +2752,8 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
|
||||||
VSX_MAX_MINJ(xsmaxjdp, 1);
|
VSX_MAX_MINJ(xsmaxjdp, 1);
|
||||||
VSX_MAX_MINJ(xsminjdp, 0);
|
VSX_MAX_MINJ(xsminjdp, 0);
|
||||||
|
|
||||||
/* VSX_CMP - VSX floating point compare
|
/*
|
||||||
|
* VSX_CMP - VSX floating point compare
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* tp - type (float32 or float64)
|
* tp - type (float32 or float64)
|
||||||
|
@ -2793,7 +2813,8 @@ VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
|
||||||
VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
|
VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
|
||||||
VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
|
VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
|
||||||
|
|
||||||
/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
|
/*
|
||||||
|
* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* stp - source type (float32 or float64)
|
* stp - source type (float32 or float64)
|
||||||
|
@ -2832,7 +2853,8 @@ VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
|
||||||
VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0)
|
VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0)
|
||||||
VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
|
VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
|
||||||
|
|
||||||
/* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
|
/*
|
||||||
|
* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* stp - source type (float32 or float64)
|
* stp - source type (float32 or float64)
|
||||||
|
@ -2868,7 +2890,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
|
||||||
|
|
||||||
VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
|
VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
|
||||||
|
|
||||||
/* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
|
/*
|
||||||
|
* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
|
||||||
* involving one half precision value
|
* involving one half precision value
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
|
@ -2953,7 +2976,8 @@ uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
|
||||||
return float32_to_float64(xb >> 32, &tstat);
|
return float32_to_float64(xb >> 32, &tstat);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
|
/*
|
||||||
|
* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* stp - source type (float32 or float64)
|
* stp - source type (float32 or float64)
|
||||||
|
@ -3006,7 +3030,8 @@ VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
|
||||||
VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL)
|
VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL)
|
||||||
VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
|
VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
|
||||||
|
|
||||||
/* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
|
/*
|
||||||
|
* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* stp - source type (float32 or float64)
|
* stp - source type (float32 or float64)
|
||||||
* ttp - target type (int32, uint32, int64 or uint64)
|
* ttp - target type (int32, uint32, int64 or uint64)
|
||||||
|
@ -3040,7 +3065,8 @@ VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
|
||||||
VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
|
VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
|
||||||
VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
|
VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
|
||||||
|
|
||||||
/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
|
/*
|
||||||
|
* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* stp - source type (int32, uint32, int64 or uint64)
|
* stp - source type (int32, uint32, int64 or uint64)
|
||||||
|
@ -3086,7 +3112,8 @@ VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
|
||||||
VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
|
VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
|
||||||
VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
|
VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
|
||||||
|
|
||||||
/* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
|
/*
|
||||||
|
* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* stp - source type (int32, uint32, int64 or uint64)
|
* stp - source type (int32, uint32, int64 or uint64)
|
||||||
* ttp - target type (float32 or float64)
|
* ttp - target type (float32 or float64)
|
||||||
|
@ -3111,13 +3138,15 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
|
||||||
VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
|
VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
|
||||||
VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
|
VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
|
||||||
|
|
||||||
/* For "use current rounding mode", define a value that will not be one of
|
/*
|
||||||
* the existing rounding model enums.
|
* For "use current rounding mode", define a value that will not be
|
||||||
|
* one of the existing rounding model enums.
|
||||||
*/
|
*/
|
||||||
#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
|
#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
|
||||||
float_round_up + float_round_to_zero)
|
float_round_up + float_round_to_zero)
|
||||||
|
|
||||||
/* VSX_ROUND - VSX floating point round
|
/*
|
||||||
|
* VSX_ROUND - VSX floating point round
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* tp - type (float32 or float64)
|
* tp - type (float32 or float64)
|
||||||
|
@ -3150,9 +3179,11 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
/* If this is not a "use current rounding mode" instruction, \
|
/* \
|
||||||
|
* If this is not a "use current rounding mode" instruction, \
|
||||||
* then inhibit setting of the XX bit and restore rounding \
|
* then inhibit setting of the XX bit and restore rounding \
|
||||||
* mode from FPSCR */ \
|
* mode from FPSCR \
|
||||||
|
*/ \
|
||||||
if (rmode != FLOAT_ROUND_CURRENT) { \
|
if (rmode != FLOAT_ROUND_CURRENT) { \
|
||||||
fpscr_set_rounding_mode(env); \
|
fpscr_set_rounding_mode(env); \
|
||||||
env->fp_status.float_exception_flags &= ~float_flag_inexact; \
|
env->fp_status.float_exception_flags &= ~float_flag_inexact; \
|
||||||
|
@ -3234,7 +3265,8 @@ void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
|
||||||
putVSR(xT(opcode), &xt, env);
|
putVSR(xT(opcode), &xt, env);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* VSX_TEST_DC - VSX floating point test data class
|
/*
|
||||||
|
* VSX_TEST_DC - VSX floating point test data class
|
||||||
* op - instruction mnemonic
|
* op - instruction mnemonic
|
||||||
* nels - number of elements (1, 2 or 4)
|
* nels - number of elements (1, 2 or 4)
|
||||||
* xbn - VSR register number
|
* xbn - VSR register number
|
||||||
|
|
|
@ -84,11 +84,14 @@ static int ppc_gdb_register_len(int n)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We need to present the registers to gdb in the "current" memory ordering.
|
/*
|
||||||
For user-only mode we get this for free; TARGET_WORDS_BIGENDIAN is set to
|
* We need to present the registers to gdb in the "current" memory
|
||||||
the proper ordering for the binary, and cannot be changed.
|
* ordering. For user-only mode we get this for free;
|
||||||
For system mode, TARGET_WORDS_BIGENDIAN is always set, and we must check
|
* TARGET_WORDS_BIGENDIAN is set to the proper ordering for the
|
||||||
the current mode of the chip to see if we're running in little-endian. */
|
* binary, and cannot be changed. For system mode,
|
||||||
|
* TARGET_WORDS_BIGENDIAN is always set, and we must check the current
|
||||||
|
* mode of the chip to see if we're running in little-endian.
|
||||||
|
*/
|
||||||
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
|
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
@ -104,11 +107,12 @@ void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Old gdb always expects FP registers. Newer (xml-aware) gdb only
|
/*
|
||||||
|
* Old gdb always expects FP registers. Newer (xml-aware) gdb only
|
||||||
* expects whatever the target description contains. Due to a
|
* expects whatever the target description contains. Due to a
|
||||||
* historical mishap the FP registers appear in between core integer
|
* historical mishap the FP registers appear in between core integer
|
||||||
* regs and PC, MSR, CR, and so forth. We hack round this by giving the
|
* regs and PC, MSR, CR, and so forth. We hack round this by giving
|
||||||
* FP regs zero size when talking to a newer gdb.
|
* the FP regs zero size when talking to a newer gdb.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int ppc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
|
int ppc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||||
|
|
|
@ -44,10 +44,11 @@ static inline void hreg_swap_gpr_tgpr(CPUPPCState *env)
|
||||||
|
|
||||||
static inline void hreg_compute_mem_idx(CPUPPCState *env)
|
static inline void hreg_compute_mem_idx(CPUPPCState *env)
|
||||||
{
|
{
|
||||||
/* This is our encoding for server processors. The architecture
|
/*
|
||||||
|
* This is our encoding for server processors. The architecture
|
||||||
* specifies that there is no such thing as userspace with
|
* specifies that there is no such thing as userspace with
|
||||||
* translation off, however it appears that MacOS does it and
|
* translation off, however it appears that MacOS does it and some
|
||||||
* some 32-bit CPUs support it. Weird...
|
* 32-bit CPUs support it. Weird...
|
||||||
*
|
*
|
||||||
* 0 = Guest User space virtual mode
|
* 0 = Guest User space virtual mode
|
||||||
* 1 = Guest Kernel space virtual mode
|
* 1 = Guest Kernel space virtual mode
|
||||||
|
@ -143,7 +144,8 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
|
||||||
/* Change the exception prefix on PowerPC 601 */
|
/* Change the exception prefix on PowerPC 601 */
|
||||||
env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
|
env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
|
||||||
}
|
}
|
||||||
/* If PR=1 then EE, IR and DR must be 1
|
/*
|
||||||
|
* If PR=1 then EE, IR and DR must be 1
|
||||||
*
|
*
|
||||||
* Note: We only enforce this on 64-bit server processors.
|
* Note: We only enforce this on 64-bit server processors.
|
||||||
* It appears that:
|
* It appears that:
|
||||||
|
|
|
@ -137,7 +137,8 @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
|
||||||
/* if x = 0xab, returns 0xababababababababa */
|
/* if x = 0xab, returns 0xababababababababa */
|
||||||
#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
|
#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
|
||||||
|
|
||||||
/* substract 1 from each byte, and with inverse, check if MSB is set at each
|
/*
|
||||||
|
* subtract 1 from each byte, and with inverse, check if MSB is set at each
|
||||||
* byte.
|
* byte.
|
||||||
* i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80
|
* i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80
|
||||||
* (0xFF & 0xFF) & 0x80 = 0x80 (zero found)
|
* (0xFF & 0xFF) & 0x80 = 0x80 (zero found)
|
||||||
|
@ -156,7 +157,8 @@ uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb)
|
||||||
#undef haszero
|
#undef haszero
|
||||||
#undef hasvalue
|
#undef hasvalue
|
||||||
|
|
||||||
/* Return invalid random number.
|
/*
|
||||||
|
* Return invalid random number.
|
||||||
*
|
*
|
||||||
* FIXME: Add rng backend or other mechanism to get cryptographically suitable
|
* FIXME: Add rng backend or other mechanism to get cryptographically suitable
|
||||||
* random number
|
* random number
|
||||||
|
@ -370,7 +372,8 @@ target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
|
||||||
/* 602 specific instructions */
|
/* 602 specific instructions */
|
||||||
/* mfrom is the most crazy instruction ever seen, imho ! */
|
/* mfrom is the most crazy instruction ever seen, imho ! */
|
||||||
/* Real implementation uses a ROM table. Do the same */
|
/* Real implementation uses a ROM table. Do the same */
|
||||||
/* Extremely decomposed:
|
/*
|
||||||
|
* Extremely decomposed:
|
||||||
* -arg / 256
|
* -arg / 256
|
||||||
* return 256 * log10(10 + 1.0) + 0.5
|
* return 256 * log10(10 + 1.0) + 0.5
|
||||||
*/
|
*/
|
||||||
|
@ -634,7 +637,8 @@ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
|
||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
/* VABSDU - Vector absolute difference unsigned
|
/*
|
||||||
|
* VABSDU - Vector absolute difference unsigned
|
||||||
* name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
|
* name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
|
||||||
* element - element type to access from vector
|
* element - element type to access from vector
|
||||||
*/
|
*/
|
||||||
|
@ -739,7 +743,8 @@ void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \
|
||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
/* VCMPNEZ - Vector compare not equal to zero
|
/*
|
||||||
|
* VCMPNEZ - Vector compare not equal to zero
|
||||||
* suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
|
* suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
|
||||||
* element - element type to access from vector
|
* element - element type to access from vector
|
||||||
*/
|
*/
|
||||||
|
@ -1736,9 +1741,11 @@ VEXTU_X_DO(vextuhrx, 16, 0)
|
||||||
VEXTU_X_DO(vextuwrx, 32, 0)
|
VEXTU_X_DO(vextuwrx, 32, 0)
|
||||||
#undef VEXTU_X_DO
|
#undef VEXTU_X_DO
|
||||||
|
|
||||||
/* The specification says that the results are undefined if all of the
|
/*
|
||||||
* shift counts are not identical. We check to make sure that they are
|
* The specification says that the results are undefined if all of the
|
||||||
* to conform to what real hardware appears to do. */
|
* shift counts are not identical. We check to make sure that they
|
||||||
|
* are to conform to what real hardware appears to do.
|
||||||
|
*/
|
||||||
#define VSHIFT(suffix, leftp) \
|
#define VSHIFT(suffix, leftp) \
|
||||||
void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
|
void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
|
||||||
{ \
|
{ \
|
||||||
|
@ -1805,9 +1812,10 @@ void helper_vsrv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
||||||
int i;
|
int i;
|
||||||
unsigned int shift, bytes;
|
unsigned int shift, bytes;
|
||||||
|
|
||||||
/* Use reverse order, as destination and source register can be same. Its
|
/*
|
||||||
* being modified in place saving temporary, reverse order will guarantee
|
* Use reverse order, as destination and source register can be
|
||||||
* that computed result is not fed back.
|
* same. Its being modified in place saving temporary, reverse
|
||||||
|
* order will guarantee that computed result is not fed back.
|
||||||
*/
|
*/
|
||||||
for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
|
for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
|
||||||
shift = b->u8[i] & 0x7; /* extract shift value */
|
shift = b->u8[i] & 0x7; /* extract shift value */
|
||||||
|
|
232
target/ppc/kvm.c
232
target/ppc/kvm.c
|
@ -49,24 +49,14 @@
|
||||||
#include "elf.h"
|
#include "elf.h"
|
||||||
#include "sysemu/kvm_int.h"
|
#include "sysemu/kvm_int.h"
|
||||||
|
|
||||||
//#define DEBUG_KVM
|
|
||||||
|
|
||||||
#ifdef DEBUG_KVM
|
|
||||||
#define DPRINTF(fmt, ...) \
|
|
||||||
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
|
|
||||||
#else
|
|
||||||
#define DPRINTF(fmt, ...) \
|
|
||||||
do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
|
#define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
|
||||||
|
|
||||||
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
||||||
KVM_CAP_LAST_INFO
|
KVM_CAP_LAST_INFO
|
||||||
};
|
};
|
||||||
|
|
||||||
static int cap_interrupt_unset = false;
|
static int cap_interrupt_unset;
|
||||||
static int cap_interrupt_level = false;
|
static int cap_interrupt_level;
|
||||||
static int cap_segstate;
|
static int cap_segstate;
|
||||||
static int cap_booke_sregs;
|
static int cap_booke_sregs;
|
||||||
static int cap_ppc_smt;
|
static int cap_ppc_smt;
|
||||||
|
@ -96,7 +86,8 @@ static int cap_large_decr;
|
||||||
|
|
||||||
static uint32_t debug_inst_opcode;
|
static uint32_t debug_inst_opcode;
|
||||||
|
|
||||||
/* XXX We have a race condition where we actually have a level triggered
|
/*
|
||||||
|
* XXX We have a race condition where we actually have a level triggered
|
||||||
* interrupt, but the infrastructure can't expose that yet, so the guest
|
* interrupt, but the infrastructure can't expose that yet, so the guest
|
||||||
* takes but ignores it, goes to sleep and never gets notified that there's
|
* takes but ignores it, goes to sleep and never gets notified that there's
|
||||||
* still an interrupt pending.
|
* still an interrupt pending.
|
||||||
|
@ -114,10 +105,12 @@ static void kvm_kick_cpu(void *opaque)
|
||||||
qemu_cpu_kick(CPU(cpu));
|
qemu_cpu_kick(CPU(cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check whether we are running with KVM-PR (instead of KVM-HV). This
|
/*
|
||||||
|
* Check whether we are running with KVM-PR (instead of KVM-HV). This
|
||||||
* should only be used for fallback tests - generally we should use
|
* should only be used for fallback tests - generally we should use
|
||||||
* explicit capabilities for the features we want, rather than
|
* explicit capabilities for the features we want, rather than
|
||||||
* assuming what is/isn't available depending on the KVM variant. */
|
* assuming what is/isn't available depending on the KVM variant.
|
||||||
|
*/
|
||||||
static bool kvmppc_is_pr(KVMState *ks)
|
static bool kvmppc_is_pr(KVMState *ks)
|
||||||
{
|
{
|
||||||
/* Assume KVM-PR if the GET_PVINFO capability is available */
|
/* Assume KVM-PR if the GET_PVINFO capability is available */
|
||||||
|
@ -143,8 +136,10 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
|
||||||
cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
|
cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
|
||||||
cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
|
cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
|
||||||
cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
|
cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
|
||||||
/* Note: we don't set cap_papr here, because this capability is
|
/*
|
||||||
* only activated after this by kvmppc_set_papr() */
|
* Note: we don't set cap_papr here, because this capability is
|
||||||
|
* only activated after this by kvmppc_set_papr()
|
||||||
|
*/
|
||||||
cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
|
cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
|
||||||
cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
|
cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
|
||||||
cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
|
cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
|
||||||
|
@ -160,7 +155,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
|
||||||
* in KVM at this moment.
|
* in KVM at this moment.
|
||||||
*
|
*
|
||||||
* TODO: call kvm_vm_check_extension() with the right capability
|
* TODO: call kvm_vm_check_extension() with the right capability
|
||||||
* after the kernel starts implementing it.*/
|
* after the kernel starts implementing it.
|
||||||
|
*/
|
||||||
cap_ppc_pvr_compat = false;
|
cap_ppc_pvr_compat = false;
|
||||||
|
|
||||||
if (!cap_interrupt_level) {
|
if (!cap_interrupt_level) {
|
||||||
|
@ -186,10 +182,13 @@ static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
|
if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
|
||||||
/* What we're really trying to say is "if we're on BookE, we use
|
/*
|
||||||
the native PVR for now". This is the only sane way to check
|
* What we're really trying to say is "if we're on BookE, we
|
||||||
it though, so we potentially confuse users that they can run
|
* use the native PVR for now". This is the only sane way to
|
||||||
BookE guests on BookS. Let's hope nobody dares enough :) */
|
* check it though, so we potentially confuse users that they
|
||||||
|
* can run BookE guests on BookS. Let's hope nobody dares
|
||||||
|
* enough :)
|
||||||
|
*/
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
if (!cap_segstate) {
|
if (!cap_segstate) {
|
||||||
|
@ -421,12 +420,14 @@ void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
|
if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
|
||||||
/* Mostly what guest pagesizes we can use are related to the
|
/*
|
||||||
|
* Mostly what guest pagesizes we can use are related to the
|
||||||
* host pages used to map guest RAM, which is handled in the
|
* host pages used to map guest RAM, which is handled in the
|
||||||
* platform code. Cache-Inhibited largepages (64k) however are
|
* platform code. Cache-Inhibited largepages (64k) however are
|
||||||
* used for I/O, so if they're mapped to the host at all it
|
* used for I/O, so if they're mapped to the host at all it
|
||||||
* will be a normal mapping, not a special hugepage one used
|
* will be a normal mapping, not a special hugepage one used
|
||||||
* for RAM. */
|
* for RAM.
|
||||||
|
*/
|
||||||
if (getpagesize() < 0x10000) {
|
if (getpagesize() < 0x10000) {
|
||||||
error_setg(errp,
|
error_setg(errp,
|
||||||
"KVM can't supply 64kiB CI pages, which guest expects");
|
"KVM can't supply 64kiB CI pages, which guest expects");
|
||||||
|
@ -440,9 +441,9 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu)
|
||||||
return POWERPC_CPU(cpu)->vcpu_id;
|
return POWERPC_CPU(cpu)->vcpu_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* e500 supports 2 h/w breakpoint and 2 watchpoint.
|
/*
|
||||||
* book3s supports only 1 watchpoint, so array size
|
* e500 supports 2 h/w breakpoint and 2 watchpoint. book3s supports
|
||||||
* of 4 is sufficient for now.
|
* only 1 watchpoint, so array size of 4 is sufficient for now.
|
||||||
*/
|
*/
|
||||||
#define MAX_HW_BKPTS 4
|
#define MAX_HW_BKPTS 4
|
||||||
|
|
||||||
|
@ -497,9 +498,12 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||||
break;
|
break;
|
||||||
case POWERPC_MMU_2_07:
|
case POWERPC_MMU_2_07:
|
||||||
if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
|
if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
|
||||||
/* KVM-HV has transactional memory on POWER8 also without the
|
/*
|
||||||
* KVM_CAP_PPC_HTM extension, so enable it here instead as
|
* KVM-HV has transactional memory on POWER8 also without
|
||||||
* long as it's availble to userspace on the host. */
|
* the KVM_CAP_PPC_HTM extension, so enable it here
|
||||||
|
* instead as long as it's availble to userspace on the
|
||||||
|
* host.
|
||||||
|
*/
|
||||||
if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
|
if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
|
||||||
cap_htm = true;
|
cap_htm = true;
|
||||||
}
|
}
|
||||||
|
@ -626,7 +630,7 @@ static int kvm_put_fp(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&fpscr;
|
reg.addr = (uintptr_t)&fpscr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
|
trace_kvm_failed_fpscr_set(strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -647,8 +651,8 @@ static int kvm_put_fp(CPUState *cs)
|
||||||
|
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
|
trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i,
|
||||||
i, strerror(errno));
|
strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -659,7 +663,7 @@ static int kvm_put_fp(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&env->vscr;
|
reg.addr = (uintptr_t)&env->vscr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
|
trace_kvm_failed_vscr_set(strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -668,7 +672,7 @@ static int kvm_put_fp(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
|
reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
|
trace_kvm_failed_vr_set(i, strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -693,7 +697,7 @@ static int kvm_get_fp(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&fpscr;
|
reg.addr = (uintptr_t)&fpscr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
|
trace_kvm_failed_fpscr_get(strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
env->fpscr = fpscr;
|
env->fpscr = fpscr;
|
||||||
|
@ -709,8 +713,8 @@ static int kvm_get_fp(CPUState *cs)
|
||||||
|
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to get %s%d from KVM: %s\n",
|
trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i,
|
||||||
vsx ? "VSR" : "FPR", i, strerror(errno));
|
strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
#ifdef HOST_WORDS_BIGENDIAN
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
|
@ -733,7 +737,7 @@ static int kvm_get_fp(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&env->vscr;
|
reg.addr = (uintptr_t)&env->vscr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
|
trace_kvm_failed_vscr_get(strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -742,8 +746,7 @@ static int kvm_get_fp(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
|
reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to get VR%d from KVM: %s\n",
|
trace_kvm_failed_vr_get(i, strerror(errno));
|
||||||
i, strerror(errno));
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -764,7 +767,7 @@ static int kvm_get_vpa(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
|
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
|
trace_kvm_failed_vpa_addr_get(strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -774,8 +777,7 @@ static int kvm_get_vpa(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
|
reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
|
trace_kvm_failed_slb_get(strerror(errno));
|
||||||
strerror(errno));
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -785,8 +787,7 @@ static int kvm_get_vpa(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
|
reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
|
trace_kvm_failed_dtl_get(strerror(errno));
|
||||||
strerror(errno));
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -800,10 +801,12 @@ static int kvm_put_vpa(CPUState *cs)
|
||||||
struct kvm_one_reg reg;
|
struct kvm_one_reg reg;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* SLB shadow or DTL can't be registered unless a master VPA is
|
/*
|
||||||
|
* SLB shadow or DTL can't be registered unless a master VPA is
|
||||||
* registered. That means when restoring state, if a VPA *is*
|
* registered. That means when restoring state, if a VPA *is*
|
||||||
* registered, we need to set that up first. If not, we need to
|
* registered, we need to set that up first. If not, we need to
|
||||||
* deregister the others before deregistering the master VPA */
|
* deregister the others before deregistering the master VPA
|
||||||
|
*/
|
||||||
assert(spapr_cpu->vpa_addr
|
assert(spapr_cpu->vpa_addr
|
||||||
|| !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
|
|| !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
|
||||||
|
|
||||||
|
@ -812,7 +815,7 @@ static int kvm_put_vpa(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
|
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
|
trace_kvm_failed_vpa_addr_set(strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -823,7 +826,7 @@ static int kvm_put_vpa(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
|
reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
|
trace_kvm_failed_slb_set(strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -833,8 +836,7 @@ static int kvm_put_vpa(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
|
reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
|
trace_kvm_failed_dtl_set(strerror(errno));
|
||||||
strerror(errno));
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -843,7 +845,7 @@ static int kvm_put_vpa(CPUState *cs)
|
||||||
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
|
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
|
trace_kvm_failed_null_vpa_addr_set(strerror(errno));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -929,8 +931,9 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||||
|
|
||||||
regs.pid = env->spr[SPR_BOOKE_PID];
|
regs.pid = env->spr[SPR_BOOKE_PID];
|
||||||
|
|
||||||
for (i = 0;i < 32; i++)
|
for (i = 0; i < 32; i++) {
|
||||||
regs.gpr[i] = env->gpr[i];
|
regs.gpr[i] = env->gpr[i];
|
||||||
|
}
|
||||||
|
|
||||||
regs.cr = 0;
|
regs.cr = 0;
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
|
@ -938,8 +941,9 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
kvm_put_fp(cs);
|
kvm_put_fp(cs);
|
||||||
|
|
||||||
|
@ -962,10 +966,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||||
if (cap_one_reg) {
|
if (cap_one_reg) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* We deliberately ignore errors here, for kernels which have
|
/*
|
||||||
|
* We deliberately ignore errors here, for kernels which have
|
||||||
* the ONE_REG calls, but don't support the specific
|
* the ONE_REG calls, but don't support the specific
|
||||||
* registers, there's a reasonable chance things will still
|
* registers, there's a reasonable chance things will still
|
||||||
* work, at least until we try to migrate. */
|
* work, at least until we try to migrate.
|
||||||
|
*/
|
||||||
for (i = 0; i < 1024; i++) {
|
for (i = 0; i < 1024; i++) {
|
||||||
uint64_t id = env->spr_cb[i].one_reg_id;
|
uint64_t id = env->spr_cb[i].one_reg_id;
|
||||||
|
|
||||||
|
@ -996,7 +1002,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
||||||
|
|
||||||
if (cap_papr) {
|
if (cap_papr) {
|
||||||
if (kvm_put_vpa(cs) < 0) {
|
if (kvm_put_vpa(cs) < 0) {
|
||||||
DPRINTF("Warning: Unable to set VPA information to KVM\n");
|
trace_kvm_failed_put_vpa();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1207,8 +1213,9 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
cr = regs.cr;
|
cr = regs.cr;
|
||||||
for (i = 7; i >= 0; i--) {
|
for (i = 7; i >= 0; i--) {
|
||||||
|
@ -1236,8 +1243,9 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||||
|
|
||||||
env->spr[SPR_BOOKE_PID] = regs.pid;
|
env->spr[SPR_BOOKE_PID] = regs.pid;
|
||||||
|
|
||||||
for (i = 0;i < 32; i++)
|
for (i = 0; i < 32; i++) {
|
||||||
env->gpr[i] = regs.gpr[i];
|
env->gpr[i] = regs.gpr[i];
|
||||||
|
}
|
||||||
|
|
||||||
kvm_get_fp(cs);
|
kvm_get_fp(cs);
|
||||||
|
|
||||||
|
@ -1262,10 +1270,12 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||||
if (cap_one_reg) {
|
if (cap_one_reg) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* We deliberately ignore errors here, for kernels which have
|
/*
|
||||||
|
* We deliberately ignore errors here, for kernels which have
|
||||||
* the ONE_REG calls, but don't support the specific
|
* the ONE_REG calls, but don't support the specific
|
||||||
* registers, there's a reasonable chance things will still
|
* registers, there's a reasonable chance things will still
|
||||||
* work, at least until we try to migrate. */
|
* work, at least until we try to migrate.
|
||||||
|
*/
|
||||||
for (i = 0; i < 1024; i++) {
|
for (i = 0; i < 1024; i++) {
|
||||||
uint64_t id = env->spr_cb[i].one_reg_id;
|
uint64_t id = env->spr_cb[i].one_reg_id;
|
||||||
|
|
||||||
|
@ -1296,7 +1306,7 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||||
|
|
||||||
if (cap_papr) {
|
if (cap_papr) {
|
||||||
if (kvm_get_vpa(cs) < 0) {
|
if (kvm_get_vpa(cs) < 0) {
|
||||||
DPRINTF("Warning: Unable to get VPA information from KVM\n");
|
trace_kvm_failed_get_vpa();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1339,20 +1349,24 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
|
||||||
|
|
||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
|
|
||||||
/* PowerPC QEMU tracks the various core input pins (interrupt, critical
|
/*
|
||||||
* interrupt, reset, etc) in PPC-specific env->irq_input_state. */
|
* PowerPC QEMU tracks the various core input pins (interrupt,
|
||||||
|
* critical interrupt, reset, etc) in PPC-specific
|
||||||
|
* env->irq_input_state.
|
||||||
|
*/
|
||||||
if (!cap_interrupt_level &&
|
if (!cap_interrupt_level &&
|
||||||
run->ready_for_interrupt_injection &&
|
run->ready_for_interrupt_injection &&
|
||||||
(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||||
(env->irq_input_state & (1 << PPC_INPUT_INT)))
|
(env->irq_input_state & (1 << PPC_INPUT_INT)))
|
||||||
{
|
{
|
||||||
/* For now KVM disregards the 'irq' argument. However, in the
|
/*
|
||||||
* future KVM could cache it in-kernel to avoid a heavyweight exit
|
* For now KVM disregards the 'irq' argument. However, in the
|
||||||
* when reading the UIC.
|
* future KVM could cache it in-kernel to avoid a heavyweight
|
||||||
|
* exit when reading the UIC.
|
||||||
*/
|
*/
|
||||||
irq = KVM_INTERRUPT_SET;
|
irq = KVM_INTERRUPT_SET;
|
||||||
|
|
||||||
DPRINTF("injected interrupt %d\n", irq);
|
trace_kvm_injected_interrupt(irq);
|
||||||
r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
|
r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
|
printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
|
||||||
|
@ -1363,9 +1377,12 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
|
||||||
(NANOSECONDS_PER_SECOND / 50));
|
(NANOSECONDS_PER_SECOND / 50));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We don't know if there are more interrupts pending after this. However,
|
/*
|
||||||
* the guest will return to userspace in the course of handling this one
|
* We don't know if there are more interrupts pending after
|
||||||
* anyways, so we will get a chance to deliver the rest. */
|
* this. However, the guest will return to userspace in the course
|
||||||
|
* of handling this one anyways, so we will get a chance to
|
||||||
|
* deliver the rest.
|
||||||
|
*/
|
||||||
|
|
||||||
qemu_mutex_unlock_iothread();
|
qemu_mutex_unlock_iothread();
|
||||||
}
|
}
|
||||||
|
@ -1394,18 +1411,22 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* map dcr access to existing qemu dcr emulation */
|
/* map dcr access to existing qemu dcr emulation */
|
||||||
static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
|
static int kvmppc_handle_dcr_read(CPUPPCState *env,
|
||||||
|
uint32_t dcrn, uint32_t *data)
|
||||||
{
|
{
|
||||||
if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
|
if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) {
|
||||||
fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
|
fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
|
static int kvmppc_handle_dcr_write(CPUPPCState *env,
|
||||||
|
uint32_t dcrn, uint32_t data)
|
||||||
{
|
{
|
||||||
if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
|
if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) {
|
||||||
fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
|
fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1697,20 +1718,20 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
||||||
switch (run->exit_reason) {
|
switch (run->exit_reason) {
|
||||||
case KVM_EXIT_DCR:
|
case KVM_EXIT_DCR:
|
||||||
if (run->dcr.is_write) {
|
if (run->dcr.is_write) {
|
||||||
DPRINTF("handle dcr write\n");
|
trace_kvm_handle_dcr_write();
|
||||||
ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
|
ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF("handle dcr read\n");
|
trace_kvm_handle_drc_read();
|
||||||
ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
|
ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case KVM_EXIT_HLT:
|
case KVM_EXIT_HLT:
|
||||||
DPRINTF("handle halt\n");
|
trace_kvm_handle_halt();
|
||||||
ret = kvmppc_handle_halt(cpu);
|
ret = kvmppc_handle_halt(cpu);
|
||||||
break;
|
break;
|
||||||
#if defined(TARGET_PPC64)
|
#if defined(TARGET_PPC64)
|
||||||
case KVM_EXIT_PAPR_HCALL:
|
case KVM_EXIT_PAPR_HCALL:
|
||||||
DPRINTF("handle PAPR hypercall\n");
|
trace_kvm_handle_papr_hcall();
|
||||||
run->papr_hcall.ret = spapr_hypercall(cpu,
|
run->papr_hcall.ret = spapr_hypercall(cpu,
|
||||||
run->papr_hcall.nr,
|
run->papr_hcall.nr,
|
||||||
run->papr_hcall.args);
|
run->papr_hcall.args);
|
||||||
|
@ -1718,18 +1739,18 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
case KVM_EXIT_EPR:
|
case KVM_EXIT_EPR:
|
||||||
DPRINTF("handle epr\n");
|
trace_kvm_handle_epr();
|
||||||
run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
|
run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
case KVM_EXIT_WATCHDOG:
|
case KVM_EXIT_WATCHDOG:
|
||||||
DPRINTF("handle watchdog expiry\n");
|
trace_kvm_handle_watchdog_expiry();
|
||||||
watchdog_perform_action();
|
watchdog_perform_action();
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case KVM_EXIT_DEBUG:
|
case KVM_EXIT_DEBUG:
|
||||||
DPRINTF("handle debug exception\n");
|
trace_kvm_handle_debug_exception();
|
||||||
if (kvm_handle_debug(cpu, run)) {
|
if (kvm_handle_debug(cpu, run)) {
|
||||||
ret = EXCP_DEBUG;
|
ret = EXCP_DEBUG;
|
||||||
break;
|
break;
|
||||||
|
@ -1849,7 +1870,8 @@ uint32_t kvmppc_get_tbfreq(void)
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(ns = strchr(line, ':'))) {
|
ns = strchr(line, ':');
|
||||||
|
if (!ns) {
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1875,7 +1897,8 @@ static int kvmppc_find_cpu_dt(char *buf, int buf_len)
|
||||||
struct dirent *dirp;
|
struct dirent *dirp;
|
||||||
DIR *dp;
|
DIR *dp;
|
||||||
|
|
||||||
if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
|
dp = opendir(PROC_DEVTREE_CPU);
|
||||||
|
if (!dp) {
|
||||||
printf("Can't open directory " PROC_DEVTREE_CPU "\n");
|
printf("Can't open directory " PROC_DEVTREE_CPU "\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -1929,10 +1952,11 @@ static uint64_t kvmppc_read_int_dt(const char *filename)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Read a CPU node property from the host device tree that's a single
|
/*
|
||||||
|
* Read a CPU node property from the host device tree that's a single
|
||||||
* integer (32-bit or 64-bit). Returns 0 if anything goes wrong
|
* integer (32-bit or 64-bit). Returns 0 if anything goes wrong
|
||||||
* (can't find or open the property, or doesn't understand the
|
* (can't find or open the property, or doesn't understand the format)
|
||||||
* format) */
|
*/
|
||||||
static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
|
static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
|
||||||
{
|
{
|
||||||
char buf[PATH_MAX], *tmp;
|
char buf[PATH_MAX], *tmp;
|
||||||
|
@ -2064,8 +2088,10 @@ void kvmppc_set_papr(PowerPCCPU *cpu)
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update the capability flag so we sync the right information
|
/*
|
||||||
* with kvm */
|
* Update the capability flag so we sync the right information
|
||||||
|
* with kvm
|
||||||
|
*/
|
||||||
cap_papr = 1;
|
cap_papr = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2133,8 +2159,10 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
|
||||||
long rampagesize, best_page_shift;
|
long rampagesize, best_page_shift;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* Find the largest hardware supported page size that's less than
|
/*
|
||||||
* or equal to the (logical) backing page size of guest RAM */
|
* Find the largest hardware supported page size that's less than
|
||||||
|
* or equal to the (logical) backing page size of guest RAM
|
||||||
|
*/
|
||||||
kvm_get_smmu_info(&info, &error_fatal);
|
kvm_get_smmu_info(&info, &error_fatal);
|
||||||
rampagesize = qemu_minrampagesize();
|
rampagesize = qemu_minrampagesize();
|
||||||
best_page_shift = 0;
|
best_page_shift = 0;
|
||||||
|
@ -2184,7 +2212,8 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
|
||||||
int fd;
|
int fd;
|
||||||
void *table;
|
void *table;
|
||||||
|
|
||||||
/* Must set fd to -1 so we don't try to munmap when called for
|
/*
|
||||||
|
* Must set fd to -1 so we don't try to munmap when called for
|
||||||
* destroying the table, which the upper layers -will- do
|
* destroying the table, which the upper layers -will- do
|
||||||
*/
|
*/
|
||||||
*pfd = -1;
|
*pfd = -1;
|
||||||
|
@ -2272,10 +2301,12 @@ int kvmppc_reset_htab(int shift_hint)
|
||||||
int ret;
|
int ret;
|
||||||
ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
|
ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
|
||||||
if (ret == -ENOTTY) {
|
if (ret == -ENOTTY) {
|
||||||
/* At least some versions of PR KVM advertise the
|
/*
|
||||||
|
* At least some versions of PR KVM advertise the
|
||||||
* capability, but don't implement the ioctl(). Oops.
|
* capability, but don't implement the ioctl(). Oops.
|
||||||
* Return 0 so that we allocate the htab in qemu, as is
|
* Return 0 so that we allocate the htab in qemu, as is
|
||||||
* correct for PR. */
|
* correct for PR.
|
||||||
|
*/
|
||||||
return 0;
|
return 0;
|
||||||
} else if (ret < 0) {
|
} else if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2283,9 +2314,12 @@ int kvmppc_reset_htab(int shift_hint)
|
||||||
return shift;
|
return shift;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We have a kernel that predates the htab reset calls. For PR
|
/*
|
||||||
|
* We have a kernel that predates the htab reset calls. For PR
|
||||||
* KVM, we need to allocate the htab ourselves, for an HV KVM of
|
* KVM, we need to allocate the htab ourselves, for an HV KVM of
|
||||||
* this era, it has allocated a 16MB fixed size hash table already. */
|
* this era, it has allocated a 16MB fixed size hash table
|
||||||
|
* already.
|
||||||
|
*/
|
||||||
if (kvmppc_is_pr(kvm_state)) {
|
if (kvmppc_is_pr(kvm_state)) {
|
||||||
/* PR - tell caller to allocate htab */
|
/* PR - tell caller to allocate htab */
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2667,8 +2701,8 @@ int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} while ((rc != 0)
|
} while ((rc != 0)
|
||||||
&& ((max_ns < 0)
|
&& ((max_ns < 0) ||
|
||||||
|| ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
|
((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
|
||||||
|
|
||||||
return (rc == 0) ? 1 : 0;
|
return (rc == 0) ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,8 @@ static inline int kvmppc_get_hasidle(CPUPPCState *env)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
|
static inline int kvmppc_get_hypercall(CPUPPCState *env,
|
||||||
|
uint8_t *buf, int buf_len)
|
||||||
{
|
{
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,22 +24,26 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
|
||||||
#endif
|
#endif
|
||||||
target_ulong xer;
|
target_ulong xer;
|
||||||
|
|
||||||
for (i = 0; i < 32; i++)
|
for (i = 0; i < 32; i++) {
|
||||||
qemu_get_betls(f, &env->gpr[i]);
|
qemu_get_betls(f, &env->gpr[i]);
|
||||||
|
}
|
||||||
#if !defined(TARGET_PPC64)
|
#if !defined(TARGET_PPC64)
|
||||||
for (i = 0; i < 32; i++)
|
for (i = 0; i < 32; i++) {
|
||||||
qemu_get_betls(f, &env->gprh[i]);
|
qemu_get_betls(f, &env->gprh[i]);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
qemu_get_betls(f, &env->lr);
|
qemu_get_betls(f, &env->lr);
|
||||||
qemu_get_betls(f, &env->ctr);
|
qemu_get_betls(f, &env->ctr);
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++) {
|
||||||
qemu_get_be32s(f, &env->crf[i]);
|
qemu_get_be32s(f, &env->crf[i]);
|
||||||
|
}
|
||||||
qemu_get_betls(f, &xer);
|
qemu_get_betls(f, &xer);
|
||||||
cpu_write_xer(env, xer);
|
cpu_write_xer(env, xer);
|
||||||
qemu_get_betls(f, &env->reserve_addr);
|
qemu_get_betls(f, &env->reserve_addr);
|
||||||
qemu_get_betls(f, &env->msr);
|
qemu_get_betls(f, &env->msr);
|
||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++) {
|
||||||
qemu_get_betls(f, &env->tgpr[i]);
|
qemu_get_betls(f, &env->tgpr[i]);
|
||||||
|
}
|
||||||
for (i = 0; i < 32; i++) {
|
for (i = 0; i < 32; i++) {
|
||||||
union {
|
union {
|
||||||
float64 d;
|
float64 d;
|
||||||
|
@ -56,14 +60,19 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
|
||||||
qemu_get_sbe32s(f, &slb_nr);
|
qemu_get_sbe32s(f, &slb_nr);
|
||||||
#endif
|
#endif
|
||||||
qemu_get_betls(f, &sdr1);
|
qemu_get_betls(f, &sdr1);
|
||||||
for (i = 0; i < 32; i++)
|
for (i = 0; i < 32; i++) {
|
||||||
qemu_get_betls(f, &env->sr[i]);
|
qemu_get_betls(f, &env->sr[i]);
|
||||||
for (i = 0; i < 2; i++)
|
}
|
||||||
for (j = 0; j < 8; j++)
|
for (i = 0; i < 2; i++) {
|
||||||
|
for (j = 0; j < 8; j++) {
|
||||||
qemu_get_betls(f, &env->DBAT[i][j]);
|
qemu_get_betls(f, &env->DBAT[i][j]);
|
||||||
for (i = 0; i < 2; i++)
|
}
|
||||||
for (j = 0; j < 8; j++)
|
}
|
||||||
|
for (i = 0; i < 2; i++) {
|
||||||
|
for (j = 0; j < 8; j++) {
|
||||||
qemu_get_betls(f, &env->IBAT[i][j]);
|
qemu_get_betls(f, &env->IBAT[i][j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
qemu_get_sbe32s(f, &env->nb_tlb);
|
qemu_get_sbe32s(f, &env->nb_tlb);
|
||||||
qemu_get_sbe32s(f, &env->tlb_per_way);
|
qemu_get_sbe32s(f, &env->tlb_per_way);
|
||||||
qemu_get_sbe32s(f, &env->nb_ways);
|
qemu_get_sbe32s(f, &env->nb_ways);
|
||||||
|
@ -71,17 +80,19 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
|
||||||
qemu_get_sbe32s(f, &env->id_tlbs);
|
qemu_get_sbe32s(f, &env->id_tlbs);
|
||||||
qemu_get_sbe32s(f, &env->nb_pids);
|
qemu_get_sbe32s(f, &env->nb_pids);
|
||||||
if (env->tlb.tlb6) {
|
if (env->tlb.tlb6) {
|
||||||
// XXX assumes 6xx
|
/* XXX assumes 6xx */
|
||||||
for (i = 0; i < env->nb_tlb; i++) {
|
for (i = 0; i < env->nb_tlb; i++) {
|
||||||
qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
|
qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
|
||||||
qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
|
qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
|
||||||
qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
|
qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++) {
|
||||||
qemu_get_betls(f, &env->pb[i]);
|
qemu_get_betls(f, &env->pb[i]);
|
||||||
for (i = 0; i < 1024; i++)
|
}
|
||||||
|
for (i = 0; i < 1024; i++) {
|
||||||
qemu_get_betls(f, &env->spr[i]);
|
qemu_get_betls(f, &env->spr[i]);
|
||||||
|
}
|
||||||
if (!cpu->vhyp) {
|
if (!cpu->vhyp) {
|
||||||
ppc_store_sdr1(env, sdr1);
|
ppc_store_sdr1(env, sdr1);
|
||||||
}
|
}
|
||||||
|
@ -94,8 +105,9 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
|
||||||
qemu_get_sbe32s(f, &env->error_code);
|
qemu_get_sbe32s(f, &env->error_code);
|
||||||
qemu_get_be32s(f, &env->pending_interrupts);
|
qemu_get_be32s(f, &env->pending_interrupts);
|
||||||
qemu_get_be32s(f, &env->irq_input_state);
|
qemu_get_be32s(f, &env->irq_input_state);
|
||||||
for (i = 0; i < POWERPC_EXCP_NB; i++)
|
for (i = 0; i < POWERPC_EXCP_NB; i++) {
|
||||||
qemu_get_betls(f, &env->excp_vectors[i]);
|
qemu_get_betls(f, &env->excp_vectors[i]);
|
||||||
|
}
|
||||||
qemu_get_betls(f, &env->excp_prefix);
|
qemu_get_betls(f, &env->excp_prefix);
|
||||||
qemu_get_betls(f, &env->ivor_mask);
|
qemu_get_betls(f, &env->ivor_mask);
|
||||||
qemu_get_betls(f, &env->ivpr_mask);
|
qemu_get_betls(f, &env->ivpr_mask);
|
||||||
|
@ -267,8 +279,10 @@ static int cpu_pre_save(void *opaque)
|
||||||
|
|
||||||
/* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
|
/* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
|
||||||
if (cpu->pre_2_8_migration) {
|
if (cpu->pre_2_8_migration) {
|
||||||
/* Mask out bits that got added to msr_mask since the versions
|
/*
|
||||||
* which stupidly included it in the migration stream. */
|
* Mask out bits that got added to msr_mask since the versions
|
||||||
|
* which stupidly included it in the migration stream.
|
||||||
|
*/
|
||||||
target_ulong metamask = 0
|
target_ulong metamask = 0
|
||||||
#if defined(TARGET_PPC64)
|
#if defined(TARGET_PPC64)
|
||||||
| (1ULL << MSR_TS0)
|
| (1ULL << MSR_TS0)
|
||||||
|
@ -277,9 +291,10 @@ static int cpu_pre_save(void *opaque)
|
||||||
;
|
;
|
||||||
cpu->mig_msr_mask = env->msr_mask & ~metamask;
|
cpu->mig_msr_mask = env->msr_mask & ~metamask;
|
||||||
cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
|
cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
|
||||||
/* CPU models supported by old machines all have PPC_MEM_TLBIE,
|
/*
|
||||||
* so we set it unconditionally to allow backward migration from
|
* CPU models supported by old machines all have
|
||||||
* a POWER9 host to a POWER8 host.
|
* PPC_MEM_TLBIE, so we set it unconditionally to allow
|
||||||
|
* backward migration from a POWER9 host to a POWER8 host.
|
||||||
*/
|
*/
|
||||||
cpu->mig_insns_flags |= PPC_MEM_TLBIE;
|
cpu->mig_insns_flags |= PPC_MEM_TLBIE;
|
||||||
cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
|
cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
|
||||||
|
@ -395,7 +410,10 @@ static int cpu_post_load(void *opaque, int version_id)
|
||||||
ppc_store_sdr1(env, env->spr[SPR_SDR1]);
|
ppc_store_sdr1(env, env->spr[SPR_SDR1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB before restoring */
|
/*
|
||||||
|
* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB
|
||||||
|
* before restoring
|
||||||
|
*/
|
||||||
msr = env->msr;
|
msr = env->msr;
|
||||||
env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
|
env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
|
||||||
ppc_store_msr(env, msr);
|
ppc_store_msr(env, msr);
|
||||||
|
@ -409,7 +427,7 @@ static bool fpu_needed(void *opaque)
|
||||||
{
|
{
|
||||||
PowerPCCPU *cpu = opaque;
|
PowerPCCPU *cpu = opaque;
|
||||||
|
|
||||||
return (cpu->env.insns_flags & PPC_FLOAT);
|
return cpu->env.insns_flags & PPC_FLOAT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const VMStateDescription vmstate_fpu = {
|
static const VMStateDescription vmstate_fpu = {
|
||||||
|
@ -428,7 +446,7 @@ static bool altivec_needed(void *opaque)
|
||||||
{
|
{
|
||||||
PowerPCCPU *cpu = opaque;
|
PowerPCCPU *cpu = opaque;
|
||||||
|
|
||||||
return (cpu->env.insns_flags & PPC_ALTIVEC);
|
return cpu->env.insns_flags & PPC_ALTIVEC;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int get_vscr(QEMUFile *f, void *opaque, size_t size,
|
static int get_vscr(QEMUFile *f, void *opaque, size_t size,
|
||||||
|
@ -483,7 +501,7 @@ static bool vsx_needed(void *opaque)
|
||||||
{
|
{
|
||||||
PowerPCCPU *cpu = opaque;
|
PowerPCCPU *cpu = opaque;
|
||||||
|
|
||||||
return (cpu->env.insns_flags2 & PPC2_VSX);
|
return cpu->env.insns_flags2 & PPC2_VSX;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const VMStateDescription vmstate_vsx = {
|
static const VMStateDescription vmstate_vsx = {
|
||||||
|
@ -591,7 +609,7 @@ static bool slb_needed(void *opaque)
|
||||||
PowerPCCPU *cpu = opaque;
|
PowerPCCPU *cpu = opaque;
|
||||||
|
|
||||||
/* We don't support any of the old segment table based 64-bit CPUs */
|
/* We don't support any of the old segment table based 64-bit CPUs */
|
||||||
return (cpu->env.mmu_model & POWERPC_MMU_64);
|
return cpu->env.mmu_model & POWERPC_MMU_64;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int slb_post_load(void *opaque, int version_id)
|
static int slb_post_load(void *opaque, int version_id)
|
||||||
|
@ -600,8 +618,10 @@ static int slb_post_load(void *opaque, int version_id)
|
||||||
CPUPPCState *env = &cpu->env;
|
CPUPPCState *env = &cpu->env;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* We've pulled in the raw esid and vsid values from the migration
|
/*
|
||||||
* stream, but we need to recompute the page size pointers */
|
* We've pulled in the raw esid and vsid values from the migration
|
||||||
|
* stream, but we need to recompute the page size pointers
|
||||||
|
*/
|
||||||
for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
|
for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
|
||||||
if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
|
if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
|
||||||
/* Migration source had bad values in its SLB */
|
/* Migration source had bad values in its SLB */
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "qemu/atomic128.h"
|
#include "qemu/atomic128.h"
|
||||||
|
|
||||||
//#define DEBUG_OP
|
/* #define DEBUG_OP */
|
||||||
|
|
||||||
static inline bool needs_byteswap(const CPUPPCState *env)
|
static inline bool needs_byteswap(const CPUPPCState *env)
|
||||||
{
|
{
|
||||||
|
@ -103,10 +103,11 @@ void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
|
||||||
do_lsw(env, addr, nb, reg, GETPC());
|
do_lsw(env, addr, nb, reg, GETPC());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* PPC32 specification says we must generate an exception if
|
/*
|
||||||
* rA is in the range of registers to be loaded.
|
* PPC32 specification says we must generate an exception if rA is in
|
||||||
* In an other hand, IBM says this is valid, but rA won't be loaded.
|
* the range of registers to be loaded. In an other hand, IBM says
|
||||||
* For now, I'll follow the spec...
|
* this is valid, but rA won't be loaded. For now, I'll follow the
|
||||||
|
* spec...
|
||||||
*/
|
*/
|
||||||
void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
|
void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
|
||||||
uint32_t ra, uint32_t rb)
|
uint32_t ra, uint32_t rb)
|
||||||
|
@ -199,7 +200,8 @@ void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode)
|
||||||
void helper_icbi(CPUPPCState *env, target_ulong addr)
|
void helper_icbi(CPUPPCState *env, target_ulong addr)
|
||||||
{
|
{
|
||||||
addr &= ~(env->dcache_line_size - 1);
|
addr &= ~(env->dcache_line_size - 1);
|
||||||
/* Invalidate one cache line :
|
/*
|
||||||
|
* Invalidate one cache line :
|
||||||
* PowerPC specification says this is to be treated like a load
|
* PowerPC specification says this is to be treated like a load
|
||||||
* (not a fetch) by the MMU. To be sure it will be so,
|
* (not a fetch) by the MMU. To be sure it will be so,
|
||||||
* do the load "by hand".
|
* do the load "by hand".
|
||||||
|
@ -346,10 +348,12 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
|
||||||
#define LO_IDX 0
|
#define LO_IDX 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* We use msr_le to determine index ordering in a vector. However,
|
/*
|
||||||
byteswapping is not simply controlled by msr_le. We also need to take
|
* We use msr_le to determine index ordering in a vector. However,
|
||||||
into account endianness of the target. This is done for the little-endian
|
* byteswapping is not simply controlled by msr_le. We also need to
|
||||||
PPC64 user-mode target. */
|
* take into account endianness of the target. This is done for the
|
||||||
|
* little-endian PPC64 user-mode target.
|
||||||
|
*/
|
||||||
|
|
||||||
#define LVE(name, access, swap, element) \
|
#define LVE(name, access, swap, element) \
|
||||||
void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
|
void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
|
||||||
|
@ -476,12 +480,13 @@ VSX_STXVL(stxvll, 1)
|
||||||
|
|
||||||
void helper_tbegin(CPUPPCState *env)
|
void helper_tbegin(CPUPPCState *env)
|
||||||
{
|
{
|
||||||
/* As a degenerate implementation, always fail tbegin. The reason
|
/*
|
||||||
|
* As a degenerate implementation, always fail tbegin. The reason
|
||||||
* given is "Nesting overflow". The "persistent" bit is set,
|
* given is "Nesting overflow". The "persistent" bit is set,
|
||||||
* providing a hint to the error handler to not retry. The TFIAR
|
* providing a hint to the error handler to not retry. The TFIAR
|
||||||
* captures the address of the failure, which is this tbegin
|
* captures the address of the failure, which is this tbegin
|
||||||
* instruction. Instruction execution will continue with the
|
* instruction. Instruction execution will continue with the next
|
||||||
* next instruction in memory, which is precisely what we want.
|
* instruction in memory, which is precisely what we want.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
env->spr[SPR_TEXASR] =
|
env->spr[SPR_TEXASR] =
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
static const uint8_t mfrom_ROM_table[602] =
|
static const uint8_t mfrom_ROM_table[602] = {
|
||||||
{
|
|
||||||
77, 77, 76, 76, 75, 75, 74, 74,
|
77, 77, 76, 76, 75, 75, 74, 74,
|
||||||
73, 73, 72, 72, 71, 71, 70, 70,
|
73, 73, 72, 72, 71, 71, 70, 70,
|
||||||
69, 69, 68, 68, 68, 67, 67, 66,
|
69, 69, 68, 68, 68, 67, 67, 66,
|
||||||
|
|
|
@ -10,7 +10,8 @@ int main (void)
|
||||||
|
|
||||||
printf("static const uint8_t mfrom_ROM_table[602] =\n{\n ");
|
printf("static const uint8_t mfrom_ROM_table[602] =\n{\n ");
|
||||||
for (i = 0; i < 602; i++) {
|
for (i = 0; i < 602; i++) {
|
||||||
/* Extremely decomposed:
|
/*
|
||||||
|
* Extremely decomposed:
|
||||||
* -T0 / 256
|
* -T0 / 256
|
||||||
* T0 = 256 * log10(10 + 1.0) + 0.5
|
* T0 = 256 * log10(10 + 1.0) + 0.5
|
||||||
*/
|
*/
|
||||||
|
@ -23,9 +24,10 @@ int main (void)
|
||||||
d += 0.5;
|
d += 0.5;
|
||||||
n = d;
|
n = d;
|
||||||
printf("%3d, ", n);
|
printf("%3d, ", n);
|
||||||
if ((i & 7) == 7)
|
if ((i & 7) == 7) {
|
||||||
printf("\n ");
|
printf("\n ");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
printf("\n};\n");
|
printf("\n};\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -210,10 +210,11 @@ void ppc_store_msr(CPUPPCState *env, target_ulong value)
|
||||||
hreg_store_msr(env, value, 0);
|
hreg_store_msr(env, value, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This code is lifted from MacOnLinux. It is called whenever
|
/*
|
||||||
* THRM1,2 or 3 is read an fixes up the values in such a way
|
* This code is lifted from MacOnLinux. It is called whenever THRM1,2
|
||||||
* that will make MacOS not hang. These registers exist on some
|
* or 3 is read an fixes up the values in such a way that will make
|
||||||
* 75x and 74xx processors.
|
* MacOS not hang. These registers exist on some 75x and 74xx
|
||||||
|
* processors.
|
||||||
*/
|
*/
|
||||||
void helper_fixup_thrm(CPUPPCState *env)
|
void helper_fixup_thrm(CPUPPCState *env)
|
||||||
{
|
{
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
#include "mmu-hash32.h"
|
#include "mmu-hash32.h"
|
||||||
#include "exec/log.h"
|
#include "exec/log.h"
|
||||||
|
|
||||||
//#define DEBUG_BAT
|
/* #define DEBUG_BAT */
|
||||||
|
|
||||||
#ifdef DEBUG_BATS
|
#ifdef DEBUG_BATS
|
||||||
# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
|
# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
|
||||||
|
@ -228,8 +228,10 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
|
||||||
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
|
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
|
||||||
|
|
||||||
if ((sr & 0x1FF00000) >> 20 == 0x07f) {
|
if ((sr & 0x1FF00000) >> 20 == 0x07f) {
|
||||||
/* Memory-forced I/O controller interface access */
|
/*
|
||||||
/* If T=1 and BUID=x'07F', the 601 performs a memory access
|
* Memory-forced I/O controller interface access
|
||||||
|
*
|
||||||
|
* If T=1 and BUID=x'07F', the 601 performs a memory access
|
||||||
* to SR[28-31] LA[4-31], bypassing all protection mechanisms.
|
* to SR[28-31] LA[4-31], bypassing all protection mechanisms.
|
||||||
*/
|
*/
|
||||||
*raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
|
*raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
|
||||||
|
@ -265,9 +267,11 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
case ACCESS_CACHE:
|
case ACCESS_CACHE:
|
||||||
/* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
|
/*
|
||||||
/* Should make the instruction do no-op.
|
* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
|
||||||
* As it already do no-op, it's quite easy :-)
|
*
|
||||||
|
* Should make the instruction do no-op. As it already do
|
||||||
|
* no-op, it's quite easy :-)
|
||||||
*/
|
*/
|
||||||
*raddr = eaddr;
|
*raddr = eaddr;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -341,6 +345,24 @@ static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1)
|
||||||
|
{
|
||||||
|
target_ulong base = ppc_hash32_hpt_base(cpu);
|
||||||
|
hwaddr offset = pte_offset + 6;
|
||||||
|
|
||||||
|
/* The HW performs a non-atomic byte update */
|
||||||
|
stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1)
|
||||||
|
{
|
||||||
|
target_ulong base = ppc_hash32_hpt_base(cpu);
|
||||||
|
hwaddr offset = pte_offset + 7;
|
||||||
|
|
||||||
|
/* The HW performs a non-atomic byte update */
|
||||||
|
stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
|
||||||
|
}
|
||||||
|
|
||||||
static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
|
static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
|
||||||
target_ulong sr, target_ulong eaddr,
|
target_ulong sr, target_ulong eaddr,
|
||||||
ppc_hash_pte32_t *pte)
|
ppc_hash_pte32_t *pte)
|
||||||
|
@ -399,7 +421,6 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
|
||||||
hwaddr pte_offset;
|
hwaddr pte_offset;
|
||||||
ppc_hash_pte32_t pte;
|
ppc_hash_pte32_t pte;
|
||||||
int prot;
|
int prot;
|
||||||
uint32_t new_pte1;
|
|
||||||
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
|
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
|
||||||
hwaddr raddr;
|
hwaddr raddr;
|
||||||
|
|
||||||
|
@ -515,17 +536,19 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
|
||||||
|
|
||||||
/* 8. Update PTE referenced and changed bits if necessary */
|
/* 8. Update PTE referenced and changed bits if necessary */
|
||||||
|
|
||||||
new_pte1 = pte.pte1 | HPTE32_R_R; /* set referenced bit */
|
if (!(pte.pte1 & HPTE32_R_R)) {
|
||||||
|
ppc_hash32_set_r(cpu, pte_offset, pte.pte1);
|
||||||
|
}
|
||||||
|
if (!(pte.pte1 & HPTE32_R_C)) {
|
||||||
if (rwx == 1) {
|
if (rwx == 1) {
|
||||||
new_pte1 |= HPTE32_R_C; /* set changed (dirty) bit */
|
ppc_hash32_set_c(cpu, pte_offset, pte.pte1);
|
||||||
} else {
|
} else {
|
||||||
/* Treat the page as read-only for now, so that a later write
|
/*
|
||||||
* will pass through this function again to set the C bit */
|
* Treat the page as read-only for now, so that a later write
|
||||||
|
* will pass through this function again to set the C bit
|
||||||
|
*/
|
||||||
prot &= ~PAGE_WRITE;
|
prot &= ~PAGE_WRITE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (new_pte1 != pte.pte1) {
|
|
||||||
ppc_hash32_store_hpte1(cpu, pte_offset, new_pte1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 9. Determine the real address from the PTE */
|
/* 9. Determine the real address from the PTE */
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
#include "hw/hw.h"
|
#include "hw/hw.h"
|
||||||
#include "mmu-book3s-v3.h"
|
#include "mmu-book3s-v3.h"
|
||||||
|
|
||||||
//#define DEBUG_SLB
|
/* #define DEBUG_SLB */
|
||||||
|
|
||||||
#ifdef DEBUG_SLB
|
#ifdef DEBUG_SLB
|
||||||
# define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
|
# define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
|
||||||
|
@ -58,9 +58,11 @@ static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
|
||||||
|
|
||||||
LOG_SLB("%s: slot %d %016" PRIx64 " %016"
|
LOG_SLB("%s: slot %d %016" PRIx64 " %016"
|
||||||
PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
|
PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
|
||||||
/* We check for 1T matches on all MMUs here - if the MMU
|
/*
|
||||||
|
* We check for 1T matches on all MMUs here - if the MMU
|
||||||
* doesn't have 1T segment support, we will have prevented 1T
|
* doesn't have 1T segment support, we will have prevented 1T
|
||||||
* entries from being inserted in the slbmte code. */
|
* entries from being inserted in the slbmte code.
|
||||||
|
*/
|
||||||
if (((slb->esid == esid_256M) &&
|
if (((slb->esid == esid_256M) &&
|
||||||
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
|
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
|
||||||
|| ((slb->esid == esid_1T) &&
|
|| ((slb->esid == esid_1T) &&
|
||||||
|
@ -103,7 +105,8 @@ void helper_slbia(CPUPPCState *env)
|
||||||
|
|
||||||
if (slb->esid & SLB_ESID_V) {
|
if (slb->esid & SLB_ESID_V) {
|
||||||
slb->esid &= ~SLB_ESID_V;
|
slb->esid &= ~SLB_ESID_V;
|
||||||
/* XXX: given the fact that segment size is 256 MB or 1TB,
|
/*
|
||||||
|
* XXX: given the fact that segment size is 256 MB or 1TB,
|
||||||
* and we still don't have a tlb_flush_mask(env, n, mask)
|
* and we still don't have a tlb_flush_mask(env, n, mask)
|
||||||
* in QEMU, we just invalidate all TLBs
|
* in QEMU, we just invalidate all TLBs
|
||||||
*/
|
*/
|
||||||
|
@ -126,7 +129,8 @@ static void __helper_slbie(CPUPPCState *env, target_ulong addr,
|
||||||
if (slb->esid & SLB_ESID_V) {
|
if (slb->esid & SLB_ESID_V) {
|
||||||
slb->esid &= ~SLB_ESID_V;
|
slb->esid &= ~SLB_ESID_V;
|
||||||
|
|
||||||
/* XXX: given the fact that segment size is 256 MB or 1TB,
|
/*
|
||||||
|
* XXX: given the fact that segment size is 256 MB or 1TB,
|
||||||
* and we still don't have a tlb_flush_mask(env, n, mask)
|
* and we still don't have a tlb_flush_mask(env, n, mask)
|
||||||
* in QEMU, we just invalidate all TLBs
|
* in QEMU, we just invalidate all TLBs
|
||||||
*/
|
*/
|
||||||
|
@ -306,8 +310,10 @@ static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
|
||||||
{
|
{
|
||||||
CPUPPCState *env = &cpu->env;
|
CPUPPCState *env = &cpu->env;
|
||||||
unsigned pp, key;
|
unsigned pp, key;
|
||||||
/* Some pp bit combinations have undefined behaviour, so default
|
/*
|
||||||
* to no access in those cases */
|
* Some pp bit combinations have undefined behaviour, so default
|
||||||
|
* to no access in those cases
|
||||||
|
*/
|
||||||
int prot = 0;
|
int prot = 0;
|
||||||
|
|
||||||
key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
|
key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
|
||||||
|
@ -547,8 +553,9 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
|
||||||
if (*pshift == 0) {
|
if (*pshift == 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/* We don't do anything with pshift yet as qemu TLB only deals
|
/*
|
||||||
* with 4K pages anyway
|
* We don't do anything with pshift yet as qemu TLB only
|
||||||
|
* deals with 4K pages anyway
|
||||||
*/
|
*/
|
||||||
pte->pte0 = pte0;
|
pte->pte0 = pte0;
|
||||||
pte->pte1 = pte1;
|
pte->pte1 = pte1;
|
||||||
|
@ -572,8 +579,10 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
|
||||||
uint64_t vsid, epnmask, epn, ptem;
|
uint64_t vsid, epnmask, epn, ptem;
|
||||||
const PPCHash64SegmentPageSizes *sps = slb->sps;
|
const PPCHash64SegmentPageSizes *sps = slb->sps;
|
||||||
|
|
||||||
/* The SLB store path should prevent any bad page size encodings
|
/*
|
||||||
* getting in there, so: */
|
* The SLB store path should prevent any bad page size encodings
|
||||||
|
* getting in there, so:
|
||||||
|
*/
|
||||||
assert(sps);
|
assert(sps);
|
||||||
|
|
||||||
/* If ISL is set in LPCR we need to clamp the page size to 4K */
|
/* If ISL is set in LPCR we need to clamp the page size to 4K */
|
||||||
|
@ -716,6 +725,39 @@ static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
|
||||||
|
{
|
||||||
|
hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
|
||||||
|
|
||||||
|
if (cpu->vhyp) {
|
||||||
|
PPCVirtualHypervisorClass *vhc =
|
||||||
|
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
||||||
|
vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
base = ppc_hash64_hpt_base(cpu);
|
||||||
|
|
||||||
|
|
||||||
|
/* The HW performs a non-atomic byte update */
|
||||||
|
stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
|
||||||
|
{
|
||||||
|
hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
|
||||||
|
|
||||||
|
if (cpu->vhyp) {
|
||||||
|
PPCVirtualHypervisorClass *vhc =
|
||||||
|
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
||||||
|
vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
base = ppc_hash64_hpt_base(cpu);
|
||||||
|
|
||||||
|
/* The HW performs a non-atomic byte update */
|
||||||
|
stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
|
||||||
|
}
|
||||||
|
|
||||||
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
|
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
|
||||||
int rwx, int mmu_idx)
|
int rwx, int mmu_idx)
|
||||||
{
|
{
|
||||||
|
@ -726,23 +768,25 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
|
||||||
hwaddr ptex;
|
hwaddr ptex;
|
||||||
ppc_hash_pte64_t pte;
|
ppc_hash_pte64_t pte;
|
||||||
int exec_prot, pp_prot, amr_prot, prot;
|
int exec_prot, pp_prot, amr_prot, prot;
|
||||||
uint64_t new_pte1;
|
|
||||||
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
|
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
|
||||||
hwaddr raddr;
|
hwaddr raddr;
|
||||||
|
|
||||||
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
|
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
|
||||||
|
|
||||||
/* Note on LPCR usage: 970 uses HID4, but our special variant
|
/*
|
||||||
* of store_spr copies relevant fields into env->spr[SPR_LPCR].
|
* Note on LPCR usage: 970 uses HID4, but our special variant of
|
||||||
* Similarily we filter unimplemented bits when storing into
|
* store_spr copies relevant fields into env->spr[SPR_LPCR].
|
||||||
* LPCR depending on the MMU version. This code can thus just
|
* Similarily we filter unimplemented bits when storing into LPCR
|
||||||
* use the LPCR "as-is".
|
* depending on the MMU version. This code can thus just use the
|
||||||
|
* LPCR "as-is".
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* 1. Handle real mode accesses */
|
/* 1. Handle real mode accesses */
|
||||||
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
|
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
|
||||||
/* Translation is supposedly "off" */
|
/*
|
||||||
/* In real mode the top 4 effective address bits are (mostly) ignored */
|
* Translation is supposedly "off", but in real mode the top 4
|
||||||
|
* effective address bits are (mostly) ignored
|
||||||
|
*/
|
||||||
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
|
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
|
||||||
|
|
||||||
/* In HV mode, add HRMOR if top EA bit is clear */
|
/* In HV mode, add HRMOR if top EA bit is clear */
|
||||||
|
@ -871,17 +915,19 @@ skip_slb_search:
|
||||||
|
|
||||||
/* 6. Update PTE referenced and changed bits if necessary */
|
/* 6. Update PTE referenced and changed bits if necessary */
|
||||||
|
|
||||||
new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
|
if (!(pte.pte1 & HPTE64_R_R)) {
|
||||||
|
ppc_hash64_set_r(cpu, ptex, pte.pte1);
|
||||||
|
}
|
||||||
|
if (!(pte.pte1 & HPTE64_R_C)) {
|
||||||
if (rwx == 1) {
|
if (rwx == 1) {
|
||||||
new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
|
ppc_hash64_set_c(cpu, ptex, pte.pte1);
|
||||||
} else {
|
} else {
|
||||||
/* Treat the page as read-only for now, so that a later write
|
/*
|
||||||
* will pass through this function again to set the C bit */
|
* Treat the page as read-only for now, so that a later write
|
||||||
|
* will pass through this function again to set the C bit
|
||||||
|
*/
|
||||||
prot &= ~PAGE_WRITE;
|
prot &= ~PAGE_WRITE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (new_pte1 != pte.pte1) {
|
|
||||||
ppc_hash64_store_hpte(cpu, ptex, pte.pte0, new_pte1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 7. Determine the real address from the PTE */
|
/* 7. Determine the real address from the PTE */
|
||||||
|
@ -940,24 +986,6 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
|
||||||
& TARGET_PAGE_MASK;
|
& TARGET_PAGE_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
|
|
||||||
uint64_t pte0, uint64_t pte1)
|
|
||||||
{
|
|
||||||
hwaddr base;
|
|
||||||
hwaddr offset = ptex * HASH_PTE_SIZE_64;
|
|
||||||
|
|
||||||
if (cpu->vhyp) {
|
|
||||||
PPCVirtualHypervisorClass *vhc =
|
|
||||||
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
|
||||||
vhc->store_hpte(cpu->vhyp, ptex, pte0, pte1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
base = ppc_hash64_hpt_base(cpu);
|
|
||||||
|
|
||||||
stq_phys(CPU(cpu)->as, base + offset, pte0);
|
|
||||||
stq_phys(CPU(cpu)->as, base + offset + HASH_PTE_SIZE_64 / 2, pte1);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
|
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
|
||||||
target_ulong pte0, target_ulong pte1)
|
target_ulong pte0, target_ulong pte1)
|
||||||
{
|
{
|
||||||
|
@ -1023,8 +1051,9 @@ static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make one up. Mostly ignore the ESID which will not be
|
/*
|
||||||
* needed for translation
|
* Make one up. Mostly ignore the ESID which will not be needed
|
||||||
|
* for translation
|
||||||
*/
|
*/
|
||||||
vsid = SLB_VSID_VRMA;
|
vsid = SLB_VSID_VRMA;
|
||||||
vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
|
vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
|
||||||
|
@ -1080,11 +1109,12 @@ void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
|
||||||
}
|
}
|
||||||
env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
|
env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
|
||||||
|
|
||||||
/* XXX We could also write LPID from HID4 here
|
/*
|
||||||
|
* XXX We could also write LPID from HID4 here
|
||||||
* but since we don't tag any translation on it
|
* but since we don't tag any translation on it
|
||||||
* it doesn't actually matter
|
* it doesn't actually matter
|
||||||
*/
|
*
|
||||||
/* XXX For proper emulation of 970 we also need
|
* XXX For proper emulation of 970 we also need
|
||||||
* to dig HRMOR out of HID5
|
* to dig HRMOR out of HID5
|
||||||
*/
|
*/
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -10,8 +10,6 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
|
||||||
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
|
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
|
||||||
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
|
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
|
||||||
int mmu_idx);
|
int mmu_idx);
|
||||||
void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
|
|
||||||
uint64_t pte0, uint64_t pte1);
|
|
||||||
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
|
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
|
||||||
target_ulong pte_index,
|
target_ulong pte_index,
|
||||||
target_ulong pte0, target_ulong pte1);
|
target_ulong pte0, target_ulong pte1);
|
||||||
|
|
|
@ -228,10 +228,10 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
|
||||||
ppc_v3_pate_t pate;
|
ppc_v3_pate_t pate;
|
||||||
|
|
||||||
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
|
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
|
||||||
assert(ppc64_use_proc_tbl(cpu));
|
|
||||||
|
|
||||||
/* Real Mode Access */
|
/* HV or virtual hypervisor Real Mode Access */
|
||||||
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
|
if ((msr_hv || cpu->vhyp) &&
|
||||||
|
(((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0)))) {
|
||||||
/* In real mode top 4 effective addr bits (mostly) ignored */
|
/* In real mode top 4 effective addr bits (mostly) ignored */
|
||||||
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
|
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
|
||||||
|
|
||||||
|
@ -241,6 +241,16 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check UPRT (we avoid the check in real mode to deal with
|
||||||
|
* transitional states during kexec.
|
||||||
|
*/
|
||||||
|
if (!ppc64_use_proc_tbl(cpu)) {
|
||||||
|
qemu_log_mask(LOG_GUEST_ERROR,
|
||||||
|
"LPCR:UPRT not set in radix mode ! LPCR="
|
||||||
|
TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
|
||||||
|
}
|
||||||
|
|
||||||
/* Virtual Mode Access - get the fully qualified address */
|
/* Virtual Mode Access - get the fully qualified address */
|
||||||
if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
|
if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
|
||||||
ppc_radix64_raise_segi(cpu, rwx, eaddr);
|
ppc_radix64_raise_segi(cpu, rwx, eaddr);
|
||||||
|
|
|
@ -33,11 +33,11 @@
|
||||||
#include "mmu-book3s-v3.h"
|
#include "mmu-book3s-v3.h"
|
||||||
#include "mmu-radix64.h"
|
#include "mmu-radix64.h"
|
||||||
|
|
||||||
//#define DEBUG_MMU
|
/* #define DEBUG_MMU */
|
||||||
//#define DEBUG_BATS
|
/* #define DEBUG_BATS */
|
||||||
//#define DEBUG_SOFTWARE_TLB
|
/* #define DEBUG_SOFTWARE_TLB */
|
||||||
//#define DUMP_PAGE_TABLES
|
/* #define DUMP_PAGE_TABLES */
|
||||||
//#define FLUSH_ALL_TLBS
|
/* #define FLUSH_ALL_TLBS */
|
||||||
|
|
||||||
#ifdef DEBUG_MMU
|
#ifdef DEBUG_MMU
|
||||||
# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
|
# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
|
||||||
|
@ -152,7 +152,8 @@ static int check_prot(int prot, int rw, int access_type)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
|
static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
|
||||||
target_ulong pte1, int h, int rw, int type)
|
target_ulong pte1, int h,
|
||||||
|
int rw, int type)
|
||||||
{
|
{
|
||||||
target_ulong ptem, mmask;
|
target_ulong ptem, mmask;
|
||||||
int access, ret, pteh, ptev, pp;
|
int access, ret, pteh, ptev, pp;
|
||||||
|
@ -332,7 +333,8 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
|
||||||
pte_is_valid(tlb->pte0) ? "valid" : "inval",
|
pte_is_valid(tlb->pte0) ? "valid" : "inval",
|
||||||
tlb->EPN, eaddr, tlb->pte1,
|
tlb->EPN, eaddr, tlb->pte1,
|
||||||
rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
|
rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
|
||||||
switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) {
|
switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
|
||||||
|
0, rw, access_type)) {
|
||||||
case -3:
|
case -3:
|
||||||
/* TLB inconsistency */
|
/* TLB inconsistency */
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -347,9 +349,11 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
|
||||||
break;
|
break;
|
||||||
case 0:
|
case 0:
|
||||||
/* access granted */
|
/* access granted */
|
||||||
/* XXX: we should go on looping to check all TLBs consistency
|
/*
|
||||||
* but we can speed-up the whole thing as the
|
* XXX: we should go on looping to check all TLBs
|
||||||
* result would be undefined if TLBs are not consistent.
|
* consistency but we can speed-up the whole thing as
|
||||||
|
* the result would be undefined if TLBs are not
|
||||||
|
* consistent.
|
||||||
*/
|
*/
|
||||||
ret = 0;
|
ret = 0;
|
||||||
best = nr;
|
best = nr;
|
||||||
|
@ -550,14 +554,18 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
|
||||||
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
|
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
|
||||||
/* Direct-store segment : absolutely *BUGGY* for now */
|
/* Direct-store segment : absolutely *BUGGY* for now */
|
||||||
|
|
||||||
/* Direct-store implies a 32-bit MMU.
|
/*
|
||||||
|
* Direct-store implies a 32-bit MMU.
|
||||||
* Check the Segment Register's bus unit ID (BUID).
|
* Check the Segment Register's bus unit ID (BUID).
|
||||||
*/
|
*/
|
||||||
sr = env->sr[eaddr >> 28];
|
sr = env->sr[eaddr >> 28];
|
||||||
if ((sr & 0x1FF00000) >> 20 == 0x07f) {
|
if ((sr & 0x1FF00000) >> 20 == 0x07f) {
|
||||||
/* Memory-forced I/O controller interface access */
|
/*
|
||||||
/* If T=1 and BUID=x'07F', the 601 performs a memory access
|
* Memory-forced I/O controller interface access
|
||||||
* to SR[28-31] LA[4-31], bypassing all protection mechanisms.
|
*
|
||||||
|
* If T=1 and BUID=x'07F', the 601 performs a memory
|
||||||
|
* access to SR[28-31] LA[4-31], bypassing all protection
|
||||||
|
* mechanisms.
|
||||||
*/
|
*/
|
||||||
ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
|
ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
|
||||||
ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||||
|
@ -578,9 +586,11 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
|
||||||
/* lwarx, ldarx or srwcx. */
|
/* lwarx, ldarx or srwcx. */
|
||||||
return -4;
|
return -4;
|
||||||
case ACCESS_CACHE:
|
case ACCESS_CACHE:
|
||||||
/* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
|
/*
|
||||||
/* Should make the instruction do no-op.
|
* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
|
||||||
* As it already do no-op, it's quite easy :-)
|
*
|
||||||
|
* Should make the instruction do no-op. As it already do
|
||||||
|
* no-op, it's quite easy :-)
|
||||||
*/
|
*/
|
||||||
ctx->raddr = eaddr;
|
ctx->raddr = eaddr;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -942,12 +952,14 @@ static uint32_t mmubooke206_esr(int mmu_idx, bool rw)
|
||||||
return esr;
|
return esr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get EPID register given the mmu_idx. If this is regular load,
|
/*
|
||||||
* construct the EPID access bits from current processor state */
|
* Get EPID register given the mmu_idx. If this is regular load,
|
||||||
|
* construct the EPID access bits from current processor state
|
||||||
/* Get the effective AS and PR bits and the PID. The PID is returned only if
|
*
|
||||||
* EPID load is requested, otherwise the caller must detect the correct EPID.
|
* Get the effective AS and PR bits and the PID. The PID is returned
|
||||||
* Return true if valid EPID is returned. */
|
* only if EPID load is requested, otherwise the caller must detect
|
||||||
|
* the correct EPID. Return true if valid EPID is returned.
|
||||||
|
*/
|
||||||
static bool mmubooke206_get_as(CPUPPCState *env,
|
static bool mmubooke206_get_as(CPUPPCState *env,
|
||||||
int mmu_idx, uint32_t *epid_out,
|
int mmu_idx, uint32_t *epid_out,
|
||||||
bool *as_out, bool *pr_out)
|
bool *as_out, bool *pr_out)
|
||||||
|
@ -1369,8 +1381,9 @@ static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
|
||||||
|
|
||||||
case POWERPC_MMU_SOFT_4xx_Z:
|
case POWERPC_MMU_SOFT_4xx_Z:
|
||||||
if (unlikely(msr_pe != 0)) {
|
if (unlikely(msr_pe != 0)) {
|
||||||
/* 403 family add some particular protections,
|
/*
|
||||||
* using PBL/PBU registers for accesses with no translation.
|
* 403 family add some particular protections, using
|
||||||
|
* PBL/PBU registers for accesses with no translation.
|
||||||
*/
|
*/
|
||||||
in_plb =
|
in_plb =
|
||||||
/* Check PLB validity */
|
/* Check PLB validity */
|
||||||
|
@ -1453,7 +1466,8 @@ static int get_physical_address_wtlb(
|
||||||
if (real_mode) {
|
if (real_mode) {
|
||||||
ret = check_physical(env, ctx, eaddr, rw);
|
ret = check_physical(env, ctx, eaddr, rw);
|
||||||
} else {
|
} else {
|
||||||
cpu_abort(CPU(cpu), "PowerPC in real mode do not do any translation\n");
|
cpu_abort(CPU(cpu),
|
||||||
|
"PowerPC in real mode do not do any translation\n");
|
||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
default:
|
default:
|
||||||
|
@ -1498,9 +1512,10 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||||
|
|
||||||
if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) {
|
if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) {
|
||||||
|
|
||||||
/* Some MMUs have separate TLBs for code and data. If we only try an
|
/*
|
||||||
* ACCESS_INT, we may not be able to read instructions mapped by code
|
* Some MMUs have separate TLBs for code and data. If we only
|
||||||
* TLBs, so we also try a ACCESS_CODE.
|
* try an ACCESS_INT, we may not be able to read instructions
|
||||||
|
* mapped by code TLBs, so we also try a ACCESS_CODE.
|
||||||
*/
|
*/
|
||||||
if (unlikely(get_physical_address(env, &ctx, addr, 0,
|
if (unlikely(get_physical_address(env, &ctx, addr, 0,
|
||||||
ACCESS_CODE) != 0)) {
|
ACCESS_CODE) != 0)) {
|
||||||
|
@ -1805,6 +1820,13 @@ static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
|
||||||
|
|
||||||
base = BATu & ~0x0001FFFF;
|
base = BATu & ~0x0001FFFF;
|
||||||
end = base + mask + 0x00020000;
|
end = base + mask + 0x00020000;
|
||||||
|
if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
|
||||||
|
/* Flushing 1024 4K pages is slower than a complete flush */
|
||||||
|
LOG_BATS("Flush all BATs\n");
|
||||||
|
tlb_flush(CPU(cs));
|
||||||
|
LOG_BATS("Flush done\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
|
LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
|
||||||
TARGET_FMT_lx ")\n", base, end, mask);
|
TARGET_FMT_lx ")\n", base, end, mask);
|
||||||
for (page = base; page != end; page += TARGET_PAGE_SIZE) {
|
for (page = base; page != end; page += TARGET_PAGE_SIZE) {
|
||||||
|
@ -1834,8 +1856,9 @@ void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
|
||||||
#if !defined(FLUSH_ALL_TLBS)
|
#if !defined(FLUSH_ALL_TLBS)
|
||||||
do_invalidate_BAT(env, env->IBAT[0][nr], mask);
|
do_invalidate_BAT(env, env->IBAT[0][nr], mask);
|
||||||
#endif
|
#endif
|
||||||
/* When storing valid upper BAT, mask BEPI and BRPN
|
/*
|
||||||
* and invalidate all TLBs covered by this BAT
|
* When storing valid upper BAT, mask BEPI and BRPN and
|
||||||
|
* invalidate all TLBs covered by this BAT
|
||||||
*/
|
*/
|
||||||
mask = (value << 15) & 0x0FFE0000UL;
|
mask = (value << 15) & 0x0FFE0000UL;
|
||||||
env->IBAT[0][nr] = (value & 0x00001FFFUL) |
|
env->IBAT[0][nr] = (value & 0x00001FFFUL) |
|
||||||
|
@ -1865,8 +1888,9 @@ void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
|
||||||
|
|
||||||
dump_store_bat(env, 'D', 0, nr, value);
|
dump_store_bat(env, 'D', 0, nr, value);
|
||||||
if (env->DBAT[0][nr] != value) {
|
if (env->DBAT[0][nr] != value) {
|
||||||
/* When storing valid upper BAT, mask BEPI and BRPN
|
/*
|
||||||
* and invalidate all TLBs covered by this BAT
|
* When storing valid upper BAT, mask BEPI and BRPN and
|
||||||
|
* invalidate all TLBs covered by this BAT
|
||||||
*/
|
*/
|
||||||
mask = (value << 15) & 0x0FFE0000UL;
|
mask = (value << 15) & 0x0FFE0000UL;
|
||||||
#if !defined(FLUSH_ALL_TLBS)
|
#if !defined(FLUSH_ALL_TLBS)
|
||||||
|
@ -1913,8 +1937,9 @@ void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
|
||||||
do_inval = 1;
|
do_inval = 1;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
/* When storing valid upper BAT, mask BEPI and BRPN
|
/*
|
||||||
* and invalidate all TLBs covered by this BAT
|
* When storing valid upper BAT, mask BEPI and BRPN and
|
||||||
|
* invalidate all TLBs covered by this BAT
|
||||||
*/
|
*/
|
||||||
env->IBAT[0][nr] = (value & 0x00001FFFUL) |
|
env->IBAT[0][nr] = (value & 0x00001FFFUL) |
|
||||||
(value & ~0x0001FFFFUL & ~mask);
|
(value & ~0x0001FFFFUL & ~mask);
|
||||||
|
@ -2027,7 +2052,8 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
|
||||||
#if defined(TARGET_PPC64)
|
#if defined(TARGET_PPC64)
|
||||||
if (env->mmu_model & POWERPC_MMU_64) {
|
if (env->mmu_model & POWERPC_MMU_64) {
|
||||||
/* tlbie invalidate TLBs for all segments */
|
/* tlbie invalidate TLBs for all segments */
|
||||||
/* XXX: given the fact that there are too many segments to invalidate,
|
/*
|
||||||
|
* XXX: given the fact that there are too many segments to invalidate,
|
||||||
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
|
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
|
||||||
* we just invalidate all TLBs
|
* we just invalidate all TLBs
|
||||||
*/
|
*/
|
||||||
|
@ -2044,10 +2070,11 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
|
||||||
break;
|
break;
|
||||||
case POWERPC_MMU_32B:
|
case POWERPC_MMU_32B:
|
||||||
case POWERPC_MMU_601:
|
case POWERPC_MMU_601:
|
||||||
/* Actual CPUs invalidate entire congruence classes based on the
|
/*
|
||||||
* geometry of their TLBs and some OSes take that into account,
|
* Actual CPUs invalidate entire congruence classes based on
|
||||||
* we just mark the TLB to be flushed later (context synchronizing
|
* the geometry of their TLBs and some OSes take that into
|
||||||
* event or sync instruction on 32-bit).
|
* account, we just mark the TLB to be flushed later (context
|
||||||
|
* synchronizing event or sync instruction on 32-bit).
|
||||||
*/
|
*/
|
||||||
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
|
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
|
||||||
break;
|
break;
|
||||||
|
@ -2152,8 +2179,10 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
|
||||||
#endif
|
#endif
|
||||||
if (env->sr[srnum] != value) {
|
if (env->sr[srnum] != value) {
|
||||||
env->sr[srnum] = value;
|
env->sr[srnum] = value;
|
||||||
/* Invalidating 256MB of virtual memory in 4kB pages is way longer than
|
/*
|
||||||
flusing the whole TLB. */
|
* Invalidating 256MB of virtual memory in 4kB pages is way
|
||||||
|
* longer than flusing the whole TLB.
|
||||||
|
*/
|
||||||
#if !defined(FLUSH_ALL_TLBS) && 0
|
#if !defined(FLUSH_ALL_TLBS) && 0
|
||||||
{
|
{
|
||||||
target_ulong page, end;
|
target_ulong page, end;
|
||||||
|
@ -2264,10 +2293,12 @@ target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
|
||||||
int nb_BATs;
|
int nb_BATs;
|
||||||
target_ulong ret = 0;
|
target_ulong ret = 0;
|
||||||
|
|
||||||
/* We don't have to generate many instances of this instruction,
|
/*
|
||||||
|
* We don't have to generate many instances of this instruction,
|
||||||
* as rac is supervisor only.
|
* as rac is supervisor only.
|
||||||
|
*
|
||||||
|
* XXX: FIX THIS: Pretend we have no BAT
|
||||||
*/
|
*/
|
||||||
/* XXX: FIX THIS: Pretend we have no BAT */
|
|
||||||
nb_BATs = env->nb_BATs;
|
nb_BATs = env->nb_BATs;
|
||||||
env->nb_BATs = 0;
|
env->nb_BATs = 0;
|
||||||
if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
|
if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
|
||||||
|
@ -2422,7 +2453,8 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
|
||||||
}
|
}
|
||||||
tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
|
tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
|
||||||
& PPC4XX_TLBHI_SIZE_MASK);
|
& PPC4XX_TLBHI_SIZE_MASK);
|
||||||
/* We cannot handle TLB size < TARGET_PAGE_SIZE.
|
/*
|
||||||
|
* We cannot handle TLB size < TARGET_PAGE_SIZE.
|
||||||
* If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
|
* If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
|
||||||
*/
|
*/
|
||||||
if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
|
if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
|
||||||
|
@ -2742,7 +2774,8 @@ void helper_booke206_tlbwe(CPUPPCState *env)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tlb->mas1 & MAS1_VALID) {
|
if (tlb->mas1 & MAS1_VALID) {
|
||||||
/* Invalidate the page in QEMU TLB if it was a valid entry.
|
/*
|
||||||
|
* Invalidate the page in QEMU TLB if it was a valid entry.
|
||||||
*
|
*
|
||||||
* In "PowerPC e500 Core Family Reference Manual, Rev. 1",
|
* In "PowerPC e500 Core Family Reference Manual, Rev. 1",
|
||||||
* Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
|
* Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
|
||||||
|
@ -2751,7 +2784,8 @@ void helper_booke206_tlbwe(CPUPPCState *env)
|
||||||
* "Note that when an L2 TLB entry is written, it may be displacing an
|
* "Note that when an L2 TLB entry is written, it may be displacing an
|
||||||
* already valid entry in the same L2 TLB location (a victim). If a
|
* already valid entry in the same L2 TLB location (a victim). If a
|
||||||
* valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
|
* valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
|
||||||
* TLB entry is automatically invalidated." */
|
* TLB entry is automatically invalidated."
|
||||||
|
*/
|
||||||
flush_page(env, tlb);
|
flush_page(env, tlb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2777,8 +2811,9 @@ void helper_booke206_tlbwe(CPUPPCState *env)
|
||||||
mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
|
mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
|
||||||
|
|
||||||
if (!msr_cm) {
|
if (!msr_cm) {
|
||||||
/* Executing a tlbwe instruction in 32-bit mode will set
|
/*
|
||||||
* bits 0:31 of the TLB EPN field to zero.
|
* Executing a tlbwe instruction in 32-bit mode will set bits
|
||||||
|
* 0:31 of the TLB EPN field to zero.
|
||||||
*/
|
*/
|
||||||
mask &= 0xffffffff;
|
mask &= 0xffffffff;
|
||||||
}
|
}
|
||||||
|
@ -3022,10 +3057,13 @@ void helper_check_tlb_flush_global(CPUPPCState *env)
|
||||||
|
|
||||||
/*****************************************************************************/
|
/*****************************************************************************/
|
||||||
|
|
||||||
/* try to fill the TLB and return an exception if error. If retaddr is
|
/*
|
||||||
NULL, it means that the function was called in C code (i.e. not
|
* try to fill the TLB and return an exception if error. If retaddr is
|
||||||
from generated code or from helper.c) */
|
* NULL, it means that the function was called in C code (i.e. not
|
||||||
/* XXX: fix it to restore all registers */
|
* from generated code or from helper.c)
|
||||||
|
*
|
||||||
|
* XXX: fix it to restore all registers
|
||||||
|
*/
|
||||||
void tlb_fill(CPUState *cs, target_ulong addr, int size,
|
void tlb_fill(CPUState *cs, target_ulong addr, int size,
|
||||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
|
|
|
@ -34,8 +34,9 @@ static target_long monitor_get_ccr (const struct MonitorDef *md, int val)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
u = 0;
|
u = 0;
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++) {
|
||||||
u |= env->crf[i] << (32 - (4 * (i + 1)));
|
u |= env->crf[i] << (32 - (4 * (i + 1)));
|
||||||
|
}
|
||||||
|
|
||||||
return u;
|
return u;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,30 @@
|
||||||
# See docs/devel/tracing.txt for syntax documentation.
|
# See docs/devel/tracing.txt for syntax documentation.
|
||||||
|
|
||||||
# kvm.c
|
# kvm.c
|
||||||
kvm_failed_spr_set(int str, const char *msg) "Warning: Unable to set SPR %d to KVM: %s"
|
kvm_failed_spr_set(int spr, const char *msg) "Warning: Unable to set SPR %d to KVM: %s"
|
||||||
kvm_failed_spr_get(int str, const char *msg) "Warning: Unable to retrieve SPR %d from KVM: %s"
|
kvm_failed_spr_get(int spr, const char *msg) "Warning: Unable to retrieve SPR %d from KVM: %s"
|
||||||
|
kvm_failed_fpscr_set(const char *msg) "Unable to set FPSCR to KVM: %s"
|
||||||
|
kvm_failed_fp_set(const char *fpname, int fpnum, const char *msg) "Unable to set %s%d to KVM: %s"
|
||||||
|
kvm_failed_vscr_set(const char *msg) "Unable to set VSCR to KVM: %s"
|
||||||
|
kvm_failed_vr_set(int vr, const char *msg) "Unable to set VR%d to KVM: %s"
|
||||||
|
kvm_failed_fpscr_get(const char *msg) "Unable to get FPSCR from KVM: %s"
|
||||||
|
kvm_failed_fp_get(const char *fpname, int fpnum, const char *msg) "Unable to get %s%d from KVM: %s"
|
||||||
|
kvm_failed_vscr_get(const char *msg) "Unable to get VSCR from KVM: %s"
|
||||||
|
kvm_failed_vr_get(int vr, const char *msg) "Unable to get VR%d from KVM: %s"
|
||||||
|
kvm_failed_vpa_addr_get(const char *msg) "Unable to get VPA address from KVM: %s"
|
||||||
|
kvm_failed_slb_get(const char *msg) "Unable to get SLB shadow state from KVM: %s"
|
||||||
|
kvm_failed_dtl_get(const char *msg) "Unable to get dispatch trace log state from KVM: %s"
|
||||||
|
kvm_failed_vpa_addr_set(const char *msg) "Unable to set VPA address to KVM: %s"
|
||||||
|
kvm_failed_slb_set(const char *msg) "Unable to set SLB shadow state to KVM: %s"
|
||||||
|
kvm_failed_dtl_set(const char *msg) "Unable to set dispatch trace log state to KVM: %s"
|
||||||
|
kvm_failed_null_vpa_addr_set(const char *msg) "Unable to set VPA address to KVM: %s"
|
||||||
|
kvm_failed_put_vpa(void) "Warning: Unable to set VPA information to KVM"
|
||||||
|
kvm_failed_get_vpa(void) "Warning: Unable to get VPA information from KVM"
|
||||||
|
kvm_injected_interrupt(int irq) "injected interrupt %d"
|
||||||
|
kvm_handle_dcr_write(void) "handle dcr write"
|
||||||
|
kvm_handle_drc_read(void) "handle dcr read"
|
||||||
|
kvm_handle_halt(void) "handle halt"
|
||||||
|
kvm_handle_papr_hcall(void) "handle PAPR hypercall"
|
||||||
|
kvm_handle_epr(void) "handle epr"
|
||||||
|
kvm_handle_watchdog_expiry(void) "handle watchdog expiry"
|
||||||
|
kvm_handle_debug_exception(void) "handle debug exception"
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -585,11 +585,13 @@ static void gen_mcrfs(DisasContext *ctx)
|
||||||
shift = 4 * nibble;
|
shift = 4 * nibble;
|
||||||
tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
|
tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
|
||||||
tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
|
tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
|
||||||
tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
|
tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)],
|
||||||
|
0xf);
|
||||||
tcg_temp_free(tmp);
|
tcg_temp_free(tmp);
|
||||||
tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
|
tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
|
||||||
/* Only the exception bits (including FX) should be cleared if read */
|
/* Only the exception bits (including FX) should be cleared if read */
|
||||||
tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
|
tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr,
|
||||||
|
~((0xF << shift) & FP_EX_CLEAR_BITS));
|
||||||
/* FEX and VX need to be updated, so don't set fpscr directly */
|
/* FEX and VX need to be updated, so don't set fpscr directly */
|
||||||
tmask = tcg_const_i32(1 << nibble);
|
tmask = tcg_const_i32(1 << nibble);
|
||||||
gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
|
gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
|
||||||
|
@ -872,8 +874,10 @@ static void gen_lfdp(DisasContext *ctx)
|
||||||
EA = tcg_temp_new();
|
EA = tcg_temp_new();
|
||||||
gen_addr_imm_index(ctx, EA, 0);
|
gen_addr_imm_index(ctx, EA, 0);
|
||||||
t0 = tcg_temp_new_i64();
|
t0 = tcg_temp_new_i64();
|
||||||
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does
|
/*
|
||||||
necessary 64-bit byteswap already. */
|
* We only need to swap high and low halves. gen_qemu_ld64_i64
|
||||||
|
* does necessary 64-bit byteswap already.
|
||||||
|
*/
|
||||||
if (unlikely(ctx->le_mode)) {
|
if (unlikely(ctx->le_mode)) {
|
||||||
gen_qemu_ld64_i64(ctx, t0, EA);
|
gen_qemu_ld64_i64(ctx, t0, EA);
|
||||||
set_fpr(rD(ctx->opcode) + 1, t0);
|
set_fpr(rD(ctx->opcode) + 1, t0);
|
||||||
|
@ -904,8 +908,10 @@ static void gen_lfdpx(DisasContext *ctx)
|
||||||
EA = tcg_temp_new();
|
EA = tcg_temp_new();
|
||||||
gen_addr_reg_index(ctx, EA);
|
gen_addr_reg_index(ctx, EA);
|
||||||
t0 = tcg_temp_new_i64();
|
t0 = tcg_temp_new_i64();
|
||||||
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does
|
/*
|
||||||
necessary 64-bit byteswap already. */
|
* We only need to swap high and low halves. gen_qemu_ld64_i64
|
||||||
|
* does necessary 64-bit byteswap already.
|
||||||
|
*/
|
||||||
if (unlikely(ctx->le_mode)) {
|
if (unlikely(ctx->le_mode)) {
|
||||||
gen_qemu_ld64_i64(ctx, t0, EA);
|
gen_qemu_ld64_i64(ctx, t0, EA);
|
||||||
set_fpr(rD(ctx->opcode) + 1, t0);
|
set_fpr(rD(ctx->opcode) + 1, t0);
|
||||||
|
@ -1103,8 +1109,10 @@ static void gen_stfdp(DisasContext *ctx)
|
||||||
EA = tcg_temp_new();
|
EA = tcg_temp_new();
|
||||||
t0 = tcg_temp_new_i64();
|
t0 = tcg_temp_new_i64();
|
||||||
gen_addr_imm_index(ctx, EA, 0);
|
gen_addr_imm_index(ctx, EA, 0);
|
||||||
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
|
/*
|
||||||
necessary 64-bit byteswap already. */
|
* We only need to swap high and low halves. gen_qemu_st64_i64
|
||||||
|
* does necessary 64-bit byteswap already.
|
||||||
|
*/
|
||||||
if (unlikely(ctx->le_mode)) {
|
if (unlikely(ctx->le_mode)) {
|
||||||
get_fpr(t0, rD(ctx->opcode) + 1);
|
get_fpr(t0, rD(ctx->opcode) + 1);
|
||||||
gen_qemu_st64_i64(ctx, t0, EA);
|
gen_qemu_st64_i64(ctx, t0, EA);
|
||||||
|
@ -1135,8 +1143,10 @@ static void gen_stfdpx(DisasContext *ctx)
|
||||||
EA = tcg_temp_new();
|
EA = tcg_temp_new();
|
||||||
t0 = tcg_temp_new_i64();
|
t0 = tcg_temp_new_i64();
|
||||||
gen_addr_reg_index(ctx, EA);
|
gen_addr_reg_index(ctx, EA);
|
||||||
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
|
/*
|
||||||
necessary 64-bit byteswap already. */
|
* We only need to swap high and low halves. gen_qemu_st64_i64
|
||||||
|
* does necessary 64-bit byteswap already.
|
||||||
|
*/
|
||||||
if (unlikely(ctx->le_mode)) {
|
if (unlikely(ctx->le_mode)) {
|
||||||
get_fpr(t0, rD(ctx->opcode) + 1);
|
get_fpr(t0, rD(ctx->opcode) + 1);
|
||||||
gen_qemu_st64_i64(ctx, t0, EA);
|
gen_qemu_st64_i64(ctx, t0, EA);
|
||||||
|
@ -1204,8 +1214,9 @@ static void gen_lfqu(DisasContext *ctx)
|
||||||
gen_addr_add(ctx, t1, t0, 8);
|
gen_addr_add(ctx, t1, t0, 8);
|
||||||
gen_qemu_ld64_i64(ctx, t2, t1);
|
gen_qemu_ld64_i64(ctx, t2, t1);
|
||||||
set_fpr((rd + 1) % 32, t2);
|
set_fpr((rd + 1) % 32, t2);
|
||||||
if (ra != 0)
|
if (ra != 0) {
|
||||||
tcg_gen_mov_tl(cpu_gpr[ra], t0);
|
tcg_gen_mov_tl(cpu_gpr[ra], t0);
|
||||||
|
}
|
||||||
tcg_temp_free(t0);
|
tcg_temp_free(t0);
|
||||||
tcg_temp_free(t1);
|
tcg_temp_free(t1);
|
||||||
tcg_temp_free_i64(t2);
|
tcg_temp_free_i64(t2);
|
||||||
|
@ -1229,8 +1240,9 @@ static void gen_lfqux(DisasContext *ctx)
|
||||||
gen_qemu_ld64_i64(ctx, t2, t1);
|
gen_qemu_ld64_i64(ctx, t2, t1);
|
||||||
set_fpr((rd + 1) % 32, t2);
|
set_fpr((rd + 1) % 32, t2);
|
||||||
tcg_temp_free(t1);
|
tcg_temp_free(t1);
|
||||||
if (ra != 0)
|
if (ra != 0) {
|
||||||
tcg_gen_mov_tl(cpu_gpr[ra], t0);
|
tcg_gen_mov_tl(cpu_gpr[ra], t0);
|
||||||
|
}
|
||||||
tcg_temp_free(t0);
|
tcg_temp_free(t0);
|
||||||
tcg_temp_free_i64(t2);
|
tcg_temp_free_i64(t2);
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,8 @@ static inline void gen_evmra(DisasContext *ctx)
|
||||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||||
|
|
||||||
/* tmp := rA_lo + rA_hi << 32 */
|
/* tmp := rA_lo + rA_hi << 32 */
|
||||||
tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
|
tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)],
|
||||||
|
cpu_gprh[rA(ctx->opcode)]);
|
||||||
|
|
||||||
/* spe_acc := tmp */
|
/* spe_acc := tmp */
|
||||||
tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
|
tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
|
||||||
|
@ -1089,7 +1090,8 @@ static inline void gen_efsabs(DisasContext *ctx)
|
||||||
gen_exception(ctx, POWERPC_EXCP_SPEU);
|
gen_exception(ctx, POWERPC_EXCP_SPEU);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL);
|
tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
|
||||||
|
(target_long)~0x80000000LL);
|
||||||
}
|
}
|
||||||
static inline void gen_efsnabs(DisasContext *ctx)
|
static inline void gen_efsnabs(DisasContext *ctx)
|
||||||
{
|
{
|
||||||
|
@ -1097,7 +1099,8 @@ static inline void gen_efsnabs(DisasContext *ctx)
|
||||||
gen_exception(ctx, POWERPC_EXCP_SPEU);
|
gen_exception(ctx, POWERPC_EXCP_SPEU);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
|
tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
|
||||||
|
0x80000000);
|
||||||
}
|
}
|
||||||
static inline void gen_efsneg(DisasContext *ctx)
|
static inline void gen_efsneg(DisasContext *ctx)
|
||||||
{
|
{
|
||||||
|
@ -1105,7 +1108,8 @@ static inline void gen_efsneg(DisasContext *ctx)
|
||||||
gen_exception(ctx, POWERPC_EXCP_SPEU);
|
gen_exception(ctx, POWERPC_EXCP_SPEU);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
|
tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
|
||||||
|
0x80000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Conversion */
|
/* Conversion */
|
||||||
|
|
|
@ -28,8 +28,10 @@ static void glue(gen_, name)(DisasContext *ctx)
|
||||||
EA = tcg_temp_new(); \
|
EA = tcg_temp_new(); \
|
||||||
gen_addr_reg_index(ctx, EA); \
|
gen_addr_reg_index(ctx, EA); \
|
||||||
tcg_gen_andi_tl(EA, EA, ~0xf); \
|
tcg_gen_andi_tl(EA, EA, ~0xf); \
|
||||||
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does \
|
/* \
|
||||||
necessary 64-bit byteswap already. */ \
|
* We only need to swap high and low halves. gen_qemu_ld64_i64 \
|
||||||
|
* does necessary 64-bit byteswap already. \
|
||||||
|
*/ \
|
||||||
if (ctx->le_mode) { \
|
if (ctx->le_mode) { \
|
||||||
gen_qemu_ld64_i64(ctx, avr, EA); \
|
gen_qemu_ld64_i64(ctx, avr, EA); \
|
||||||
set_avr64(rD(ctx->opcode), avr, false); \
|
set_avr64(rD(ctx->opcode), avr, false); \
|
||||||
|
@ -61,8 +63,10 @@ static void gen_st##name(DisasContext *ctx) \
|
||||||
EA = tcg_temp_new(); \
|
EA = tcg_temp_new(); \
|
||||||
gen_addr_reg_index(ctx, EA); \
|
gen_addr_reg_index(ctx, EA); \
|
||||||
tcg_gen_andi_tl(EA, EA, ~0xf); \
|
tcg_gen_andi_tl(EA, EA, ~0xf); \
|
||||||
/* We only need to swap high and low halves. gen_qemu_st64_i64 does \
|
/* \
|
||||||
necessary 64-bit byteswap already. */ \
|
* We only need to swap high and low halves. gen_qemu_st64_i64 \
|
||||||
|
* does necessary 64-bit byteswap already. \
|
||||||
|
*/ \
|
||||||
if (ctx->le_mode) { \
|
if (ctx->le_mode) { \
|
||||||
get_avr64(avr, rD(ctx->opcode), false); \
|
get_avr64(avr, rD(ctx->opcode), false); \
|
||||||
gen_qemu_st64_i64(ctx, avr, EA); \
|
gen_qemu_st64_i64(ctx, avr, EA); \
|
||||||
|
|
|
@ -1444,7 +1444,8 @@ static void gen_##name(DisasContext *ctx) \
|
||||||
xb = tcg_const_tl(xB(ctx->opcode)); \
|
xb = tcg_const_tl(xB(ctx->opcode)); \
|
||||||
t0 = tcg_temp_new_i32(); \
|
t0 = tcg_temp_new_i32(); \
|
||||||
t1 = tcg_temp_new_i64(); \
|
t1 = tcg_temp_new_i64(); \
|
||||||
/* uimm > 15 out of bound and for \
|
/* \
|
||||||
|
* uimm > 15 out of bound and for \
|
||||||
* uimm > 12 handle as per hardware in helper \
|
* uimm > 12 handle as per hardware in helper \
|
||||||
*/ \
|
*/ \
|
||||||
if (uimm > 15) { \
|
if (uimm > 15) { \
|
||||||
|
|
|
@ -41,12 +41,13 @@
|
||||||
#include "fpu/softfloat.h"
|
#include "fpu/softfloat.h"
|
||||||
#include "qapi/qapi-commands-target.h"
|
#include "qapi/qapi-commands-target.h"
|
||||||
|
|
||||||
//#define PPC_DUMP_CPU
|
/* #define PPC_DUMP_CPU */
|
||||||
//#define PPC_DEBUG_SPR
|
/* #define PPC_DEBUG_SPR */
|
||||||
//#define PPC_DUMP_SPR_ACCESSES
|
/* #define PPC_DUMP_SPR_ACCESSES */
|
||||||
/* #define USE_APPLE_GDB */
|
/* #define USE_APPLE_GDB */
|
||||||
|
|
||||||
/* Generic callbacks:
|
/*
|
||||||
|
* Generic callbacks:
|
||||||
* do nothing but store/retrieve spr value
|
* do nothing but store/retrieve spr value
|
||||||
*/
|
*/
|
||||||
static void spr_load_dump_spr(int sprn)
|
static void spr_load_dump_spr(int sprn)
|
||||||
|
@ -230,13 +231,13 @@ static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__ (( unused ))
|
ATTRIBUTE_UNUSED
|
||||||
static void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
|
static void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
|
||||||
{
|
{
|
||||||
gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
|
gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__ (( unused ))
|
ATTRIBUTE_UNUSED
|
||||||
static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
|
static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
|
||||||
{
|
{
|
||||||
gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
|
gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
|
||||||
|
@ -267,20 +268,20 @@ static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__ (( unused ))
|
ATTRIBUTE_UNUSED
|
||||||
static void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
|
static void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
|
||||||
{
|
{
|
||||||
gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
|
gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__ (( unused ))
|
ATTRIBUTE_UNUSED
|
||||||
static void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
|
static void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
|
||||||
{
|
{
|
||||||
gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
|
gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(TARGET_PPC64)
|
#if defined(TARGET_PPC64)
|
||||||
__attribute__ (( unused ))
|
ATTRIBUTE_UNUSED
|
||||||
static void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
|
static void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
|
||||||
{
|
{
|
||||||
gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
|
gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
|
||||||
|
@ -319,12 +320,16 @@ static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
|
||||||
/* IBAT0L...IBAT7L */
|
/* IBAT0L...IBAT7L */
|
||||||
static void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
|
static void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
|
||||||
{
|
{
|
||||||
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
|
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
|
||||||
|
offsetof(CPUPPCState,
|
||||||
|
IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
|
static void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
|
||||||
{
|
{
|
||||||
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
|
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
|
||||||
|
offsetof(CPUPPCState,
|
||||||
|
IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
|
static void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
|
||||||
|
@ -359,12 +364,16 @@ static void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
|
||||||
/* DBAT0L...DBAT7L */
|
/* DBAT0L...DBAT7L */
|
||||||
static void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
|
static void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
|
||||||
{
|
{
|
||||||
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
|
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
|
||||||
|
offsetof(CPUPPCState,
|
||||||
|
DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
|
static void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
|
||||||
{
|
{
|
||||||
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
|
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
|
||||||
|
offsetof(CPUPPCState,
|
||||||
|
DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
|
static void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
|
||||||
|
@ -473,7 +482,9 @@ static void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn)
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
static void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn)
|
static void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn)
|
||||||
{
|
{
|
||||||
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
|
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
|
||||||
|
offsetof(CPUPPCState,
|
||||||
|
IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn)
|
static void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn)
|
||||||
|
@ -532,7 +543,8 @@ static void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
static void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn)
|
static void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn)
|
||||||
{
|
{
|
||||||
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1]));
|
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
|
||||||
|
offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn)
|
static void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn)
|
||||||
|
@ -661,14 +673,20 @@ static inline void vscr_init(CPUPPCState *env, uint32_t val)
|
||||||
|
|
||||||
static inline void _spr_register(CPUPPCState *env, int num,
|
static inline void _spr_register(CPUPPCState *env, int num,
|
||||||
const char *name,
|
const char *name,
|
||||||
void (*uea_read)(DisasContext *ctx, int gprn, int sprn),
|
void (*uea_read)(DisasContext *ctx,
|
||||||
void (*uea_write)(DisasContext *ctx, int sprn, int gprn),
|
int gprn, int sprn),
|
||||||
|
void (*uea_write)(DisasContext *ctx,
|
||||||
|
int sprn, int gprn),
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
|
||||||
void (*oea_read)(DisasContext *ctx, int gprn, int sprn),
|
void (*oea_read)(DisasContext *ctx,
|
||||||
void (*oea_write)(DisasContext *ctx, int sprn, int gprn),
|
int gprn, int sprn),
|
||||||
void (*hea_read)(DisasContext *opaque, int gprn, int sprn),
|
void (*oea_write)(DisasContext *ctx,
|
||||||
void (*hea_write)(DisasContext *opaque, int sprn, int gprn),
|
int sprn, int gprn),
|
||||||
|
void (*hea_read)(DisasContext *opaque,
|
||||||
|
int gprn, int sprn),
|
||||||
|
void (*hea_write)(DisasContext *opaque,
|
||||||
|
int sprn, int gprn),
|
||||||
#endif
|
#endif
|
||||||
#if defined(CONFIG_KVM)
|
#if defined(CONFIG_KVM)
|
||||||
uint64_t one_reg_id,
|
uint64_t one_reg_id,
|
||||||
|
@ -774,8 +792,10 @@ static void gen_spr_sdr1(CPUPPCState *env)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
if (env->has_hv_mode) {
|
if (env->has_hv_mode) {
|
||||||
/* SDR1 is a hypervisor resource on CPUs which have a
|
/*
|
||||||
* hypervisor mode */
|
* SDR1 is a hypervisor resource on CPUs which have a
|
||||||
|
* hypervisor mode
|
||||||
|
*/
|
||||||
spr_register_hv(env, SPR_SDR1, "SDR1",
|
spr_register_hv(env, SPR_SDR1, "SDR1",
|
||||||
SPR_NOACCESS, SPR_NOACCESS,
|
SPR_NOACCESS, SPR_NOACCESS,
|
||||||
SPR_NOACCESS, SPR_NOACCESS,
|
SPR_NOACCESS, SPR_NOACCESS,
|
||||||
|
@ -1123,7 +1143,8 @@ static void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
|
||||||
TCGv t1 = tcg_temp_new();
|
TCGv t1 = tcg_temp_new();
|
||||||
TCGv t2 = tcg_temp_new();
|
TCGv t2 = tcg_temp_new();
|
||||||
|
|
||||||
/* Note, the HV=1 PR=0 case is handled earlier by simply using
|
/*
|
||||||
|
* Note, the HV=1 PR=0 case is handled earlier by simply using
|
||||||
* spr_write_generic for HV mode in the SPR table
|
* spr_write_generic for HV mode in the SPR table
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -1157,7 +1178,8 @@ static void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
|
||||||
TCGv t1 = tcg_temp_new();
|
TCGv t1 = tcg_temp_new();
|
||||||
TCGv t2 = tcg_temp_new();
|
TCGv t2 = tcg_temp_new();
|
||||||
|
|
||||||
/* Note, the HV=1 case is handled earlier by simply using
|
/*
|
||||||
|
* Note, the HV=1 case is handled earlier by simply using
|
||||||
* spr_write_generic for HV mode in the SPR table
|
* spr_write_generic for HV mode in the SPR table
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -1187,7 +1209,8 @@ static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
|
||||||
TCGv t1 = tcg_temp_new();
|
TCGv t1 = tcg_temp_new();
|
||||||
TCGv t2 = tcg_temp_new();
|
TCGv t2 = tcg_temp_new();
|
||||||
|
|
||||||
/* Note, the HV=1 case is handled earlier by simply using
|
/*
|
||||||
|
* Note, the HV=1 case is handled earlier by simply using
|
||||||
* spr_write_generic for HV mode in the SPR table
|
* spr_write_generic for HV mode in the SPR table
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -1215,10 +1238,13 @@ static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
|
||||||
static void gen_spr_amr(CPUPPCState *env)
|
static void gen_spr_amr(CPUPPCState *env)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
/* Virtual Page Class Key protection */
|
/*
|
||||||
/* The AMR is accessible either via SPR 13 or SPR 29. 13 is
|
* Virtual Page Class Key protection
|
||||||
|
*
|
||||||
|
* The AMR is accessible either via SPR 13 or SPR 29. 13 is
|
||||||
* userspace accessible, 29 is privileged. So we only need to set
|
* userspace accessible, 29 is privileged. So we only need to set
|
||||||
* the kvm ONE_REG id on one of them, we use 29 */
|
* the kvm ONE_REG id on one of them, we use 29
|
||||||
|
*/
|
||||||
spr_register(env, SPR_UAMR, "UAMR",
|
spr_register(env, SPR_UAMR, "UAMR",
|
||||||
&spr_read_generic, &spr_write_amr,
|
&spr_read_generic, &spr_write_amr,
|
||||||
&spr_read_generic, &spr_write_amr,
|
&spr_read_generic, &spr_write_amr,
|
||||||
|
@ -1902,7 +1928,8 @@ static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask,
|
||||||
/* TLB assist registers */
|
/* TLB assist registers */
|
||||||
/* XXX : not implemented */
|
/* XXX : not implemented */
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
void (*uea_write)(DisasContext *ctx, int sprn, int gprn) = &spr_write_generic32;
|
void (*uea_write)(DisasContext *ctx, int sprn, int gprn) =
|
||||||
|
&spr_write_generic32;
|
||||||
if (i == 2 && (mas_mask & (1 << i)) && (env->insns_flags & PPC_64B)) {
|
if (i == 2 && (mas_mask & (1 << i)) && (env->insns_flags & PPC_64B)) {
|
||||||
uea_write = &spr_write_generic;
|
uea_write = &spr_write_generic;
|
||||||
}
|
}
|
||||||
|
@ -2798,7 +2825,6 @@ static void gen_spr_8xx(CPUPPCState *env)
|
||||||
0x00000000);
|
0x00000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: TODO
|
|
||||||
/*
|
/*
|
||||||
* AMR => SPR 29 (Power 2.04)
|
* AMR => SPR 29 (Power 2.04)
|
||||||
* CTRL => SPR 136 (Power 2.04)
|
* CTRL => SPR 136 (Power 2.04)
|
||||||
|
@ -3344,16 +3370,18 @@ static int check_pow_nocheck(CPUPPCState *env)
|
||||||
|
|
||||||
static int check_pow_hid0(CPUPPCState *env)
|
static int check_pow_hid0(CPUPPCState *env)
|
||||||
{
|
{
|
||||||
if (env->spr[SPR_HID0] & 0x00E00000)
|
if (env->spr[SPR_HID0] & 0x00E00000) {
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int check_pow_hid0_74xx(CPUPPCState *env)
|
static int check_pow_hid0_74xx(CPUPPCState *env)
|
||||||
{
|
{
|
||||||
if (env->spr[SPR_HID0] & 0x00600000)
|
if (env->spr[SPR_HID0] & 0x00600000) {
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -4602,7 +4630,8 @@ POWERPC_FAMILY(e200)(ObjectClass *oc, void *data)
|
||||||
dc->desc = "e200 core";
|
dc->desc = "e200 core";
|
||||||
pcc->init_proc = init_proc_e200;
|
pcc->init_proc = init_proc_e200;
|
||||||
pcc->check_pow = check_pow_hid0;
|
pcc->check_pow = check_pow_hid0;
|
||||||
/* XXX: unimplemented instructions:
|
/*
|
||||||
|
* XXX: unimplemented instructions:
|
||||||
* dcblc
|
* dcblc
|
||||||
* dcbtlst
|
* dcbtlst
|
||||||
* dcbtstls
|
* dcbtstls
|
||||||
|
@ -4848,7 +4877,8 @@ static void init_proc_e500(CPUPPCState *env, int version)
|
||||||
tlbncfg[1] = 0x40028040;
|
tlbncfg[1] = 0x40028040;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
|
cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n",
|
||||||
|
env->spr[SPR_PVR]);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
/* Cache sizes */
|
/* Cache sizes */
|
||||||
|
@ -4872,7 +4902,8 @@ static void init_proc_e500(CPUPPCState *env, int version)
|
||||||
l1cfg1 |= 0x0B83820;
|
l1cfg1 |= 0x0B83820;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
|
cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n",
|
||||||
|
env->spr[SPR_PVR]);
|
||||||
}
|
}
|
||||||
gen_spr_BookE206(env, 0x000000DF, tlbncfg, mmucfg);
|
gen_spr_BookE206(env, 0x000000DF, tlbncfg, mmucfg);
|
||||||
/* XXX : not implemented */
|
/* XXX : not implemented */
|
||||||
|
@ -5252,7 +5283,8 @@ static void init_proc_601(CPUPPCState *env)
|
||||||
0x00000000);
|
0x00000000);
|
||||||
/* Memory management */
|
/* Memory management */
|
||||||
init_excp_601(env);
|
init_excp_601(env);
|
||||||
/* XXX: beware that dcache line size is 64
|
/*
|
||||||
|
* XXX: beware that dcache line size is 64
|
||||||
* but dcbz uses 32 bytes "sectors"
|
* but dcbz uses 32 bytes "sectors"
|
||||||
* XXX: this breaks clcs instruction !
|
* XXX: this breaks clcs instruction !
|
||||||
*/
|
*/
|
||||||
|
@ -5789,7 +5821,8 @@ static void init_proc_750(CPUPPCState *env)
|
||||||
0x00000000);
|
0x00000000);
|
||||||
/* Memory management */
|
/* Memory management */
|
||||||
gen_low_BATs(env);
|
gen_low_BATs(env);
|
||||||
/* XXX: high BATs are also present but are known to be bugged on
|
/*
|
||||||
|
* XXX: high BATs are also present but are known to be bugged on
|
||||||
* die version 1.x
|
* die version 1.x
|
||||||
*/
|
*/
|
||||||
init_excp_7x0(env);
|
init_excp_7x0(env);
|
||||||
|
@ -5971,7 +6004,8 @@ POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data)
|
||||||
dc->desc = "PowerPC 750 CL";
|
dc->desc = "PowerPC 750 CL";
|
||||||
pcc->init_proc = init_proc_750cl;
|
pcc->init_proc = init_proc_750cl;
|
||||||
pcc->check_pow = check_pow_hid0;
|
pcc->check_pow = check_pow_hid0;
|
||||||
/* XXX: not implemented:
|
/*
|
||||||
|
* XXX: not implemented:
|
||||||
* cache lock instructions:
|
* cache lock instructions:
|
||||||
* dcbz_l
|
* dcbz_l
|
||||||
* floating point paired instructions
|
* floating point paired instructions
|
||||||
|
@ -7569,8 +7603,10 @@ static void gen_spr_book3s_altivec(CPUPPCState *env)
|
||||||
&spr_read_generic, &spr_write_generic,
|
&spr_read_generic, &spr_write_generic,
|
||||||
KVM_REG_PPC_VRSAVE, 0x00000000);
|
KVM_REG_PPC_VRSAVE, 0x00000000);
|
||||||
|
|
||||||
/* Can't find information on what this should be on reset. This
|
/*
|
||||||
* value is the one used by 74xx processors. */
|
* Can't find information on what this should be on reset. This
|
||||||
|
* value is the one used by 74xx processors.
|
||||||
|
*/
|
||||||
vscr_init(env, 0x00010000);
|
vscr_init(env, 0x00010000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8975,8 +9011,9 @@ static void init_ppc_proc(PowerPCCPU *cpu)
|
||||||
|
|
||||||
env->irq_inputs = NULL;
|
env->irq_inputs = NULL;
|
||||||
/* Set all exception vectors to an invalid address */
|
/* Set all exception vectors to an invalid address */
|
||||||
for (i = 0; i < POWERPC_EXCP_NB; i++)
|
for (i = 0; i < POWERPC_EXCP_NB; i++) {
|
||||||
env->excp_vectors[i] = (target_ulong)(-1ULL);
|
env->excp_vectors[i] = (target_ulong)(-1ULL);
|
||||||
|
}
|
||||||
env->ivor_mask = 0x00000000;
|
env->ivor_mask = 0x00000000;
|
||||||
env->ivpr_mask = 0x00000000;
|
env->ivpr_mask = 0x00000000;
|
||||||
/* Default MMU definitions */
|
/* Default MMU definitions */
|
||||||
|
@ -9108,8 +9145,9 @@ static void init_ppc_proc(PowerPCCPU *cpu)
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
if (env->nb_tlb != 0) {
|
if (env->nb_tlb != 0) {
|
||||||
int nb_tlb = env->nb_tlb;
|
int nb_tlb = env->nb_tlb;
|
||||||
if (env->id_tlbs != 0)
|
if (env->id_tlbs != 0) {
|
||||||
nb_tlb *= 2;
|
nb_tlb *= 2;
|
||||||
|
}
|
||||||
switch (env->tlb_type) {
|
switch (env->tlb_type) {
|
||||||
case TLB_6XX:
|
case TLB_6XX:
|
||||||
env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, nb_tlb);
|
env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, nb_tlb);
|
||||||
|
@ -9201,9 +9239,10 @@ static void fill_new_table(opc_handler_t **table, int len)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < len; i++)
|
for (i = 0; i < len; i++) {
|
||||||
table[i] = &invalid_handler;
|
table[i] = &invalid_handler;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int create_new_table(opc_handler_t **table, unsigned char idx)
|
static int create_new_table(opc_handler_t **table, unsigned char idx)
|
||||||
{
|
{
|
||||||
|
@ -9219,8 +9258,9 @@ static int create_new_table(opc_handler_t **table, unsigned char idx)
|
||||||
static int insert_in_table(opc_handler_t **table, unsigned char idx,
|
static int insert_in_table(opc_handler_t **table, unsigned char idx,
|
||||||
opc_handler_t *handler)
|
opc_handler_t *handler)
|
||||||
{
|
{
|
||||||
if (table[idx] != &invalid_handler)
|
if (table[idx] != &invalid_handler) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
table[idx] = handler;
|
table[idx] = handler;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -9341,18 +9381,21 @@ static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
|
if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
|
||||||
insn->opc3, &insn->handler) < 0)
|
insn->opc3, &insn->handler) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (register_ind_insn(ppc_opcodes, insn->opc1,
|
if (register_ind_insn(ppc_opcodes, insn->opc1,
|
||||||
insn->opc2, &insn->handler) < 0)
|
insn->opc2, &insn->handler) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0)
|
if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -9363,8 +9406,9 @@ static int test_opcode_table(opc_handler_t **table, int len)
|
||||||
|
|
||||||
for (i = 0, count = 0; i < len; i++) {
|
for (i = 0, count = 0; i < len; i++) {
|
||||||
/* Consistency fixup */
|
/* Consistency fixup */
|
||||||
if (table[i] == NULL)
|
if (table[i] == NULL) {
|
||||||
table[i] = &invalid_handler;
|
table[i] = &invalid_handler;
|
||||||
|
}
|
||||||
if (table[i] != &invalid_handler) {
|
if (table[i] != &invalid_handler) {
|
||||||
if (is_indirect_opcode(table[i])) {
|
if (is_indirect_opcode(table[i])) {
|
||||||
tmp = test_opcode_table(ind_table(table[i]),
|
tmp = test_opcode_table(ind_table(table[i]),
|
||||||
|
@ -9386,9 +9430,10 @@ static int test_opcode_table(opc_handler_t **table, int len)
|
||||||
|
|
||||||
static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
|
static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
|
||||||
{
|
{
|
||||||
if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0)
|
if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
|
||||||
printf("*** WARNING: no opcode defined !\n");
|
printf("*** WARNING: no opcode defined !\n");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*****************************************************************************/
|
/*****************************************************************************/
|
||||||
static void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
|
static void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
|
||||||
|
@ -9726,14 +9771,15 @@ static int ppc_fixup_cpu(PowerPCCPU *cpu)
|
||||||
{
|
{
|
||||||
CPUPPCState *env = &cpu->env;
|
CPUPPCState *env = &cpu->env;
|
||||||
|
|
||||||
/* TCG doesn't (yet) emulate some groups of instructions that
|
/*
|
||||||
* are implemented on some otherwise supported CPUs (e.g. VSX
|
* TCG doesn't (yet) emulate some groups of instructions that are
|
||||||
* and decimal floating point instructions on POWER7). We
|
* implemented on some otherwise supported CPUs (e.g. VSX and
|
||||||
* remove unsupported instruction groups from the cpu state's
|
* decimal floating point instructions on POWER7). We remove
|
||||||
* instruction masks and hope the guest can cope. For at
|
* unsupported instruction groups from the cpu state's instruction
|
||||||
* least the pseries machine, the unavailability of these
|
* masks and hope the guest can cope. For at least the pseries
|
||||||
* instructions can be advertised to the guest via the device
|
* machine, the unavailability of these instructions can be
|
||||||
* tree. */
|
* advertised to the guest via the device tree.
|
||||||
|
*/
|
||||||
if ((env->insns_flags & ~PPC_TCG_INSNS)
|
if ((env->insns_flags & ~PPC_TCG_INSNS)
|
||||||
|| (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
|
|| (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
|
||||||
warn_report("Disabling some instructions which are not "
|
warn_report("Disabling some instructions which are not "
|
||||||
|
@ -9928,31 +9974,37 @@ static void ppc_cpu_realize(DeviceState *dev, Error **errp)
|
||||||
" Bus model : %s\n",
|
" Bus model : %s\n",
|
||||||
excp_model, bus_model);
|
excp_model, bus_model);
|
||||||
printf(" MSR features :\n");
|
printf(" MSR features :\n");
|
||||||
if (env->flags & POWERPC_FLAG_SPE)
|
if (env->flags & POWERPC_FLAG_SPE) {
|
||||||
printf(" signal processing engine enable"
|
printf(" signal processing engine enable"
|
||||||
"\n");
|
"\n");
|
||||||
else if (env->flags & POWERPC_FLAG_VRE)
|
} else if (env->flags & POWERPC_FLAG_VRE) {
|
||||||
printf(" vector processor enable\n");
|
printf(" vector processor enable\n");
|
||||||
if (env->flags & POWERPC_FLAG_TGPR)
|
}
|
||||||
|
if (env->flags & POWERPC_FLAG_TGPR) {
|
||||||
printf(" temporary GPRs\n");
|
printf(" temporary GPRs\n");
|
||||||
else if (env->flags & POWERPC_FLAG_CE)
|
} else if (env->flags & POWERPC_FLAG_CE) {
|
||||||
printf(" critical input enable\n");
|
printf(" critical input enable\n");
|
||||||
if (env->flags & POWERPC_FLAG_SE)
|
}
|
||||||
|
if (env->flags & POWERPC_FLAG_SE) {
|
||||||
printf(" single-step trace mode\n");
|
printf(" single-step trace mode\n");
|
||||||
else if (env->flags & POWERPC_FLAG_DWE)
|
} else if (env->flags & POWERPC_FLAG_DWE) {
|
||||||
printf(" debug wait enable\n");
|
printf(" debug wait enable\n");
|
||||||
else if (env->flags & POWERPC_FLAG_UBLE)
|
} else if (env->flags & POWERPC_FLAG_UBLE) {
|
||||||
printf(" user BTB lock enable\n");
|
printf(" user BTB lock enable\n");
|
||||||
if (env->flags & POWERPC_FLAG_BE)
|
}
|
||||||
|
if (env->flags & POWERPC_FLAG_BE) {
|
||||||
printf(" branch-step trace mode\n");
|
printf(" branch-step trace mode\n");
|
||||||
else if (env->flags & POWERPC_FLAG_DE)
|
} else if (env->flags & POWERPC_FLAG_DE) {
|
||||||
printf(" debug interrupt enable\n");
|
printf(" debug interrupt enable\n");
|
||||||
if (env->flags & POWERPC_FLAG_PX)
|
}
|
||||||
|
if (env->flags & POWERPC_FLAG_PX) {
|
||||||
printf(" inclusive protection\n");
|
printf(" inclusive protection\n");
|
||||||
else if (env->flags & POWERPC_FLAG_PMM)
|
} else if (env->flags & POWERPC_FLAG_PMM) {
|
||||||
printf(" performance monitor mark\n");
|
printf(" performance monitor mark\n");
|
||||||
if (env->flags == POWERPC_FLAG_NONE)
|
}
|
||||||
|
if (env->flags == POWERPC_FLAG_NONE) {
|
||||||
printf(" none\n");
|
printf(" none\n");
|
||||||
|
}
|
||||||
printf(" Time-base/decrementer clock source: %s\n",
|
printf(" Time-base/decrementer clock source: %s\n",
|
||||||
env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock");
|
env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock");
|
||||||
dump_ppc_insns(env);
|
dump_ppc_insns(env);
|
||||||
|
@ -10094,8 +10146,9 @@ static ObjectClass *ppc_cpu_class_by_name(const char *name)
|
||||||
const char *p;
|
const char *p;
|
||||||
unsigned long pvr;
|
unsigned long pvr;
|
||||||
|
|
||||||
/* Lookup by PVR if cpu_model is valid 8 digit hex number
|
/*
|
||||||
* (excl: 0x prefix if present)
|
* Lookup by PVR if cpu_model is valid 8 digit hex number (excl:
|
||||||
|
* 0x prefix if present)
|
||||||
*/
|
*/
|
||||||
if (!qemu_strtoul(name, &p, 16, &pvr)) {
|
if (!qemu_strtoul(name, &p, 16, &pvr)) {
|
||||||
int len = p - name;
|
int len = p - name;
|
||||||
|
@ -10439,14 +10492,14 @@ static void ppc_cpu_instance_init(Object *obj)
|
||||||
env->bfd_mach = pcc->bfd_mach;
|
env->bfd_mach = pcc->bfd_mach;
|
||||||
env->check_pow = pcc->check_pow;
|
env->check_pow = pcc->check_pow;
|
||||||
|
|
||||||
/* Mark HV mode as supported if the CPU has an MSR_HV bit
|
/*
|
||||||
* in the msr_mask. The mask can later be cleared by PAPR
|
* Mark HV mode as supported if the CPU has an MSR_HV bit in the
|
||||||
* mode but the hv mode support will remain, thus enforcing
|
* msr_mask. The mask can later be cleared by PAPR mode but the hv
|
||||||
* that we cannot use priv. instructions in guest in PAPR
|
* mode support will remain, thus enforcing that we cannot use
|
||||||
* mode. For 970 we currently simply don't set HV in msr_mask
|
* priv. instructions in guest in PAPR mode. For 970 we currently
|
||||||
* thus simulating an "Apple mode" 970. If we ever want to
|
* simply don't set HV in msr_mask thus simulating an "Apple mode"
|
||||||
* support 970 HV mode, we'll have to add a processor attribute
|
* 970. If we ever want to support 970 HV mode, we'll have to add
|
||||||
* of some sort.
|
* a processor attribute of some sort.
|
||||||
*/
|
*/
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
env->has_hv_mode = !!(env->msr_mask & MSR_HVB);
|
env->has_hv_mode = !!(env->msr_mask & MSR_HVB);
|
||||||
|
|
Loading…
Reference in New Issue