spapr_vio/spapr_iommu: Move VIO bypass where it belongs

Instead of tweaking a TCE table device by adding there a bypass flag,
let's add an alias to RAM and IOMMU memory region, and enable/disable
those according to the selected bypass mode.
This way IOMMU memory region can have size of the actual window rather
than ram_size which is essential for upcoming DDW support.

This moves bypass logic to VIO layer and keeps @bypass flag in TCE table
for migration compatibility only. This replaces spapr_tce_set_bypass()
calls with explicit assignment to avoid confusion as the function could
do something more that just syncing the @bypass flag.

This adds a pointer to VIO device into the sPAPRTCETable struct to provide
the sPAPRTCETable device a way to update bypass mode for the VIO device.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
Alexey Kardashevskiy 2015-01-29 16:04:58 +11:00 committed by Alexander Graf
parent 0048fa6c80
commit ee9a569ab8
4 changed files with 47 additions and 13 deletions

View File

@ -25,6 +25,7 @@
#include "trace.h" #include "trace.h"
#include "hw/ppc/spapr.h" #include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include <libfdt.h> #include <libfdt.h>
@ -73,9 +74,7 @@ static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr,
.perm = IOMMU_NONE, .perm = IOMMU_NONE,
}; };
if (tcet->bypass) { if ((addr >> tcet->page_shift) < tcet->nb_table) {
ret.perm = IOMMU_RW;
} else if ((addr >> tcet->page_shift) < tcet->nb_table) {
/* Check if we are in bound */ /* Check if we are in bound */
hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
@ -91,10 +90,22 @@ static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr,
return ret; return ret;
} }
static int spapr_tce_table_post_load(void *opaque, int version_id)
{
sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
if (tcet->vdev) {
spapr_vio_set_bypass(tcet->vdev, tcet->bypass);
}
return 0;
}
static const VMStateDescription vmstate_spapr_tce_table = { static const VMStateDescription vmstate_spapr_tce_table = {
.name = "spapr_iommu", .name = "spapr_iommu",
.version_id = 2, .version_id = 2,
.minimum_version_id = 2, .minimum_version_id = 2,
.post_load = spapr_tce_table_post_load,
.fields = (VMStateField []) { .fields = (VMStateField []) {
/* Sanity check */ /* Sanity check */
VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable), VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable),
@ -132,7 +143,8 @@ static int spapr_tce_table_realize(DeviceState *dev)
trace_spapr_iommu_new_table(tcet->liobn, tcet, tcet->table, tcet->fd); trace_spapr_iommu_new_table(tcet->liobn, tcet, tcet->table, tcet->fd);
memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops, memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops,
"iommu-spapr", ram_size); "iommu-spapr",
(uint64_t)tcet->nb_table << tcet->page_shift);
QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list);
@ -192,17 +204,11 @@ MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet)
return &tcet->iommu; return &tcet->iommu;
} }
void spapr_tce_set_bypass(sPAPRTCETable *tcet, bool bypass)
{
tcet->bypass = bypass;
}
static void spapr_tce_reset(DeviceState *dev) static void spapr_tce_reset(DeviceState *dev)
{ {
sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
size_t table_size = tcet->nb_table * sizeof(uint64_t); size_t table_size = tcet->nb_table * sizeof(uint64_t);
tcet->bypass = false;
memset(tcet->table, 0, table_size); memset(tcet->table, 0, table_size);
} }

View File

@ -322,6 +322,18 @@ static void spapr_vio_quiesce_one(VIOsPAPRDevice *dev)
free_crq(dev); free_crq(dev);
} }
void spapr_vio_set_bypass(VIOsPAPRDevice *dev, bool bypass)
{
if (!dev->tcet) {
return;
}
memory_region_set_enabled(&dev->mrbypass, bypass);
memory_region_set_enabled(spapr_tce_get_iommu(dev->tcet), !bypass);
dev->tcet->bypass = bypass;
}
static void rtas_set_tce_bypass(PowerPCCPU *cpu, sPAPREnvironment *spapr, static void rtas_set_tce_bypass(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t token, uint32_t token,
uint32_t nargs, target_ulong args, uint32_t nargs, target_ulong args,
@ -348,7 +360,7 @@ static void rtas_set_tce_bypass(PowerPCCPU *cpu, sPAPREnvironment *spapr,
return; return;
} }
spapr_tce_set_bypass(dev->tcet, !!enable); spapr_vio_set_bypass(dev, !!enable);
rtas_st(rets, 0, RTAS_OUT_SUCCESS); rtas_st(rets, 0, RTAS_OUT_SUCCESS);
} }
@ -407,6 +419,7 @@ static void spapr_vio_busdev_reset(DeviceState *qdev)
dev->signal_state = 0; dev->signal_state = 0;
spapr_vio_set_bypass(dev, false);
if (pc->reset) { if (pc->reset) {
pc->reset(dev); pc->reset(dev);
} }
@ -456,12 +469,23 @@ static int spapr_vio_busdev_init(DeviceState *qdev)
if (pc->rtce_window_size) { if (pc->rtce_window_size) {
uint32_t liobn = SPAPR_VIO_BASE_LIOBN | dev->reg; uint32_t liobn = SPAPR_VIO_BASE_LIOBN | dev->reg;
memory_region_init(&dev->mrroot, OBJECT(dev), "iommu-spapr-root",
ram_size);
memory_region_init_alias(&dev->mrbypass, OBJECT(dev),
"iommu-spapr-bypass", get_system_memory(),
0, ram_size);
memory_region_add_subregion_overlap(&dev->mrroot, 0, &dev->mrbypass, 1);
address_space_init(&dev->as, &dev->mrroot, qdev->id);
dev->tcet = spapr_tce_new_table(qdev, liobn, dev->tcet = spapr_tce_new_table(qdev, liobn,
0, 0,
SPAPR_TCE_PAGE_SHIFT, SPAPR_TCE_PAGE_SHIFT,
pc->rtce_window_size >> pc->rtce_window_size >>
SPAPR_TCE_PAGE_SHIFT, false); SPAPR_TCE_PAGE_SHIFT, false);
address_space_init(&dev->as, spapr_tce_get_iommu(dev->tcet), qdev->id); dev->tcet->vdev = dev;
memory_region_add_subregion_overlap(&dev->mrroot, 0,
spapr_tce_get_iommu(dev->tcet), 2);
} }
return pc->init(dev); return pc->init(dev);

View File

@ -463,6 +463,7 @@ struct sPAPRTCETable {
bool vfio_accel; bool vfio_accel;
int fd; int fd;
MemoryRegion iommu; MemoryRegion iommu;
struct VIOsPAPRDevice *vdev; /* for @bypass migration compatibility only */
QLIST_ENTRY(sPAPRTCETable) list; QLIST_ENTRY(sPAPRTCETable) list;
}; };
@ -475,7 +476,6 @@ sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn,
uint32_t nb_table, uint32_t nb_table,
bool vfio_accel); bool vfio_accel);
MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet); MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet);
void spapr_tce_set_bypass(sPAPRTCETable *tcet, bool bypass);
int spapr_dma_dt(void *fdt, int node_off, const char *propname, int spapr_dma_dt(void *fdt, int node_off, const char *propname,
uint32_t liobn, uint64_t window, uint32_t size); uint32_t liobn, uint64_t window, uint32_t size);
int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname, int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,

View File

@ -64,6 +64,8 @@ struct VIOsPAPRDevice {
target_ulong signal_state; target_ulong signal_state;
VIOsPAPR_CRQ crq; VIOsPAPR_CRQ crq;
AddressSpace as; AddressSpace as;
MemoryRegion mrroot;
MemoryRegion mrbypass;
sPAPRTCETable *tcet; sPAPRTCETable *tcet;
}; };
@ -139,4 +141,6 @@ extern const VMStateDescription vmstate_spapr_vio;
#define VMSTATE_SPAPR_VIO(_f, _s) \ #define VMSTATE_SPAPR_VIO(_f, _s) \
VMSTATE_STRUCT(_f, _s, 0, vmstate_spapr_vio, VIOsPAPRDevice) VMSTATE_STRUCT(_f, _s, 0, vmstate_spapr_vio, VIOsPAPRDevice)
void spapr_vio_set_bypass(VIOsPAPRDevice *dev, bool bypass);
#endif /* _HW_SPAPR_VIO_H */ #endif /* _HW_SPAPR_VIO_H */