mirror of https://github.com/xemu-project/xemu.git
virtio,pc,pci: features, cleanups, fixes
make TCO watchdog work by default part of generic vdpa support asid interrupt for vhost-vdpa added flex bus port DVSEC for cxl misc fixes, cleanups, documentation Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmOi/OQPHG1zdEByZWRo YXQuY29tAAoJECgfDbjSjVRpGCkH/j06y7PEDHfG1MnPoFQIEWKHPyU/FMUe1RCW dRsfVmHZ8Jc1Jy4wVch461QpcIC+WL/Fshzh92G0hVDI2AWzJOxzpWQESmCphJJG Olk/H/ort4ZIrwOynAHDKLzgltoTI91uao3UT7w67NumAgVYYW4Q9ObHm2G3Wmwc fe763NmlObrNYYCIbJw/KiBLrk7M5LaMLPeoRGJefD4MYUAPXy/sUQt61VyuZpuG xFAeDB7/76MXFKJVjccSnZfa8lihOJ5AlvCBTjjY5PbGl8+U1usdd3hOVComYb02 LW4sKLkxe5sycg/bFQdBLpz2lZVlMjpY9nd9YiumIrgLBv70Uf0= =WyvK -----END PGP SIGNATURE----- Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging virtio,pc,pci: features, cleanups, fixes make TCO watchdog work by default part of generic vdpa support asid interrupt for vhost-vdpa added flex bus port DVSEC for cxl misc fixes, cleanups, documentation Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Wed 21 Dec 2022 12:32:36 GMT # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [full] # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (41 commits) contrib/vhost-user-blk: Replace lseek64 with lseek libvhost-user: Switch to unsigned int for inuse field in struct VuVirtq hw/virtio: Extract QMP related code virtio-qmp.c hw/virtio: Extract config read/write accessors to virtio-config-io.c hw/virtio: Constify qmp_virtio_feature_map_t[] hw/virtio: Guard and restrict scope of qmp_virtio_feature_map_t[] hw/virtio: Rename virtio_ss[] -> specific_virtio_ss[] hw/virtio: Add missing "hw/core/cpu.h" include hw/cxl/device: Add Flex Bus Port DVSEC hw/acpi: Rename tco.c -> ich9_tco.c acpi/tests/avocado/bits: add mformat as one of the dependencies docs/acpi/bits: document BITS_DEBUG environment variable pci: drop redundant PCIDeviceClass::is_bridge field remove DEC 21154 PCI bridge vhost: fix vq dirty bitmap syncing when vIOMMU is enabled acpi/tests/avocado/bits: add SPDX license identifiers for bios bits tests include/hw: attempt to document VirtIO feature variables vhost-user: send set log base message only once vdpa: always start CVQ in SVQ mode if possible vdpa: add shadow_data to vhost_vdpa ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
113f00e387
|
@ -1659,8 +1659,8 @@ F: hw/isa/piix3.c
|
|||
F: hw/isa/lpc_ich9.c
|
||||
F: hw/i2c/smbus_ich9.c
|
||||
F: hw/acpi/piix4.c
|
||||
F: hw/acpi/ich9.c
|
||||
F: include/hw/acpi/ich9.h
|
||||
F: hw/acpi/ich9*.c
|
||||
F: include/hw/acpi/ich9*.h
|
||||
F: include/hw/southbridge/piix.h
|
||||
F: hw/misc/sga.c
|
||||
F: hw/isa/apm.c
|
||||
|
|
|
@ -532,9 +532,9 @@ vub_get_blocksize(int fd)
|
|||
static void
|
||||
vub_initialize_config(int fd, struct virtio_blk_config *config)
|
||||
{
|
||||
off64_t capacity;
|
||||
off_t capacity;
|
||||
|
||||
capacity = lseek64(fd, 0, SEEK_END);
|
||||
capacity = lseek(fd, 0, SEEK_END);
|
||||
config->capacity = capacity >> 9;
|
||||
config->blk_size = vub_get_blocksize(fd);
|
||||
config->size_max = 65536;
|
||||
|
|
|
@ -52,6 +52,9 @@ Under ``tests/avocado/`` as the root we have:
|
|||
for their tests. In order to enable debugging, you can set **V=1**
|
||||
environment variable. This enables verbose mode for the test and also dumps
|
||||
the entire log from bios bits and more information in case failure happens.
|
||||
You can also set **BITS_DEBUG=1** to turn on debug mode. It will enable
|
||||
verbose logs and also retain the temporary work directory the test used for
|
||||
you to inspect and run the specific commands manually.
|
||||
|
||||
In order to run this test, please perform the following steps from the QEMU
|
||||
build directory:
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#include "sysemu/reset.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "hw/acpi/acpi.h"
|
||||
#include "hw/acpi/tco.h"
|
||||
#include "hw/acpi/ich9_tco.h"
|
||||
|
||||
#include "hw/i386/ich9.h"
|
||||
#include "hw/mem/pc-dimm.h"
|
||||
|
@ -316,8 +316,9 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
|
|||
|
||||
pm->smm_enabled = smm_enabled;
|
||||
|
||||
pm->enable_tco = true;
|
||||
acpi_pm_tco_init(&pm->tco_regs, &pm->io);
|
||||
if (pm->enable_tco) {
|
||||
acpi_pm_tco_init(&pm->tco_regs, &pm->io);
|
||||
}
|
||||
|
||||
if (pm->use_acpi_hotplug_bridge) {
|
||||
acpi_pcihp_init(OBJECT(lpc_pci),
|
||||
|
@ -440,6 +441,7 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm)
|
|||
pm->s4_val = 2;
|
||||
pm->use_acpi_hotplug_bridge = true;
|
||||
pm->keep_pci_slot_hpc = true;
|
||||
pm->enable_tco = true;
|
||||
|
||||
object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE,
|
||||
&pm->pm_io_base, OBJ_PROP_FLAG_READ);
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include "hw/i386/ich9.h"
|
||||
#include "migration/vmstate.h"
|
||||
|
||||
#include "hw/acpi/tco.h"
|
||||
#include "hw/acpi/ich9_tco.h"
|
||||
#include "trace.h"
|
||||
|
||||
enum {
|
||||
|
@ -86,6 +86,7 @@ static inline int can_start_tco_timer(TCOIORegs *tr)
|
|||
static uint32_t tco_ioport_readw(TCOIORegs *tr, uint32_t addr)
|
||||
{
|
||||
uint16_t rld;
|
||||
uint32_t ret = 0;
|
||||
|
||||
switch (addr) {
|
||||
case TCO_RLD:
|
||||
|
@ -96,35 +97,49 @@ static uint32_t tco_ioport_readw(TCOIORegs *tr, uint32_t addr)
|
|||
} else {
|
||||
rld = tr->tco.rld;
|
||||
}
|
||||
return rld;
|
||||
ret = rld;
|
||||
break;
|
||||
case TCO_DAT_IN:
|
||||
return tr->tco.din;
|
||||
ret = tr->tco.din;
|
||||
break;
|
||||
case TCO_DAT_OUT:
|
||||
return tr->tco.dout;
|
||||
ret = tr->tco.dout;
|
||||
break;
|
||||
case TCO1_STS:
|
||||
return tr->tco.sts1;
|
||||
ret = tr->tco.sts1;
|
||||
break;
|
||||
case TCO2_STS:
|
||||
return tr->tco.sts2;
|
||||
ret = tr->tco.sts2;
|
||||
break;
|
||||
case TCO1_CNT:
|
||||
return tr->tco.cnt1;
|
||||
ret = tr->tco.cnt1;
|
||||
break;
|
||||
case TCO2_CNT:
|
||||
return tr->tco.cnt2;
|
||||
ret = tr->tco.cnt2;
|
||||
break;
|
||||
case TCO_MESSAGE1:
|
||||
return tr->tco.msg1;
|
||||
ret = tr->tco.msg1;
|
||||
break;
|
||||
case TCO_MESSAGE2:
|
||||
return tr->tco.msg2;
|
||||
ret = tr->tco.msg2;
|
||||
break;
|
||||
case TCO_WDCNT:
|
||||
return tr->tco.wdcnt;
|
||||
ret = tr->tco.wdcnt;
|
||||
break;
|
||||
case TCO_TMR:
|
||||
return tr->tco.tmr;
|
||||
ret = tr->tco.tmr;
|
||||
break;
|
||||
case SW_IRQ_GEN:
|
||||
return tr->sw_irq_gen;
|
||||
ret = tr->sw_irq_gen;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
trace_tco_io_read(addr, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void tco_ioport_writew(TCOIORegs *tr, uint32_t addr, uint32_t val)
|
||||
{
|
||||
trace_tco_io_write(addr, val);
|
||||
switch (addr) {
|
||||
case TCO_RLD:
|
||||
tr->timeouts_no = 0;
|
|
@ -22,7 +22,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_PIIX4', if_true: files('piix4.c'))
|
|||
acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_true: files('pcihp.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_false: files('acpi-pci-hotplug-stub.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'ich9_tco.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_ERST', if_true: files('erst.c'))
|
||||
acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c'))
|
||||
acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c'))
|
||||
|
|
|
@ -186,7 +186,6 @@ static PCIBus *acpi_pcihp_find_hotplug_bus(AcpiPciHpState *s, int bsel)
|
|||
|
||||
static bool acpi_pcihp_pc_no_hotplug(AcpiPciHpState *s, PCIDevice *dev)
|
||||
{
|
||||
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev);
|
||||
DeviceClass *dc = DEVICE_GET_CLASS(dev);
|
||||
/*
|
||||
* ACPI doesn't allow hotplug of bridge devices. Don't allow
|
||||
|
@ -196,7 +195,7 @@ static bool acpi_pcihp_pc_no_hotplug(AcpiPciHpState *s, PCIDevice *dev)
|
|||
* Don't allow hot-unplug of SR-IOV Virtual Functions, as they
|
||||
* will be removed implicitly, when Physical Function is unplugged.
|
||||
*/
|
||||
return (pc->is_bridge && !dev->qdev.hotplugged) || !dc->hotpluggable ||
|
||||
return (IS_PCI_BRIDGE(dev) && !dev->qdev.hotplugged) || !dc->hotpluggable ||
|
||||
pci_is_vf(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,8 @@ piix4_gpe_writeb(uint64_t addr, unsigned width, uint64_t val) "addr: 0x%" PRIx64
|
|||
# tco.c
|
||||
tco_timer_reload(int ticks, int msec) "ticks=%d (%d ms)"
|
||||
tco_timer_expired(int timeouts_no, bool strap, bool no_reboot) "timeouts_no=%d no_reboot=%d/%d"
|
||||
tco_io_write(uint64_t addr, uint32_t val) "addr=0x%" PRIx64 " val=0x%" PRIx32
|
||||
tco_io_read(uint64_t addr, uint32_t val) "addr=0x%" PRIx64 " val=0x%" PRIx32
|
||||
|
||||
# erst.c
|
||||
acpi_erst_reg_write(uint64_t addr, uint64_t val, unsigned size) "addr: 0x%04" PRIx64 " <== 0x%016" PRIx64 " (size: %u)"
|
||||
|
|
|
@ -3218,10 +3218,17 @@ static void machvirt_machine_init(void)
|
|||
}
|
||||
type_init(machvirt_machine_init);
|
||||
|
||||
static void virt_machine_7_2_options(MachineClass *mc)
|
||||
static void virt_machine_8_0_options(MachineClass *mc)
|
||||
{
|
||||
}
|
||||
DEFINE_VIRT_MACHINE_AS_LATEST(7, 2)
|
||||
DEFINE_VIRT_MACHINE_AS_LATEST(8, 0)
|
||||
|
||||
static void virt_machine_7_2_options(MachineClass *mc)
|
||||
{
|
||||
virt_machine_8_0_options(mc);
|
||||
compat_props_add(mc->compat_props, hw_compat_7_2, hw_compat_7_2_len);
|
||||
}
|
||||
DEFINE_VIRT_MACHINE(7, 2)
|
||||
|
||||
static void virt_machine_7_1_options(MachineClass *mc)
|
||||
{
|
||||
|
|
|
@ -40,6 +40,9 @@
|
|||
#include "hw/virtio/virtio-pci.h"
|
||||
#include "qom/object_interfaces.h"
|
||||
|
||||
GlobalProperty hw_compat_7_2[] = {};
|
||||
const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2);
|
||||
|
||||
GlobalProperty hw_compat_7_1[] = {
|
||||
{ "virtio-device", "queue_reset", "false" },
|
||||
};
|
||||
|
|
|
@ -403,7 +403,6 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus,
|
|||
|
||||
for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
|
||||
DeviceClass *dc;
|
||||
PCIDeviceClass *pc;
|
||||
PCIDevice *pdev = bus->devices[devfn];
|
||||
int slot = PCI_SLOT(devfn);
|
||||
int func = PCI_FUNC(devfn);
|
||||
|
@ -414,14 +413,14 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus,
|
|||
bool cold_plugged_bridge = false;
|
||||
|
||||
if (pdev) {
|
||||
pc = PCI_DEVICE_GET_CLASS(pdev);
|
||||
dc = DEVICE_GET_CLASS(pdev);
|
||||
|
||||
/*
|
||||
* Cold plugged bridges aren't themselves hot-pluggable.
|
||||
* Hotplugged bridges *are* hot-pluggable.
|
||||
*/
|
||||
cold_plugged_bridge = pc->is_bridge && !DEVICE(pdev)->hotplugged;
|
||||
cold_plugged_bridge = IS_PCI_BRIDGE(pdev) &&
|
||||
!DEVICE(pdev)->hotplugged;
|
||||
bridge_in_acpi = cold_plugged_bridge && pcihp_bridge_en;
|
||||
|
||||
hotpluggbale_slot = bsel && dc->hotpluggable &&
|
||||
|
|
|
@ -107,6 +107,11 @@
|
|||
{ "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\
|
||||
{ "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },
|
||||
|
||||
GlobalProperty pc_compat_7_2[] = {
|
||||
{ "ICH9-LPC", "noreboot", "true" },
|
||||
};
|
||||
const size_t pc_compat_7_2_len = G_N_ELEMENTS(pc_compat_7_2);
|
||||
|
||||
GlobalProperty pc_compat_7_1[] = {};
|
||||
const size_t pc_compat_7_1_len = G_N_ELEMENTS(pc_compat_7_1);
|
||||
|
||||
|
|
|
@ -426,6 +426,7 @@ static void pc_i440fx_machine_options(MachineClass *m)
|
|||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pcmc->default_nic_model = "e1000";
|
||||
pcmc->pci_root_uid = 0;
|
||||
pcmc->default_cpu_version = 1;
|
||||
|
||||
m->family = "pc_piix";
|
||||
m->desc = "Standard PC (i440FX + PIIX, 1996)";
|
||||
|
@ -435,13 +436,23 @@ static void pc_i440fx_machine_options(MachineClass *m)
|
|||
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
|
||||
}
|
||||
|
||||
static void pc_i440fx_7_2_machine_options(MachineClass *m)
|
||||
static void pc_i440fx_8_0_machine_options(MachineClass *m)
|
||||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_i440fx_machine_options(m);
|
||||
m->alias = "pc";
|
||||
m->is_default = true;
|
||||
pcmc->default_cpu_version = 1;
|
||||
}
|
||||
|
||||
DEFINE_I440FX_MACHINE(v8_0, "pc-i440fx-8.0", NULL,
|
||||
pc_i440fx_8_0_machine_options);
|
||||
|
||||
static void pc_i440fx_7_2_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_i440fx_8_0_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
compat_props_add(m->compat_props, hw_compat_7_2, hw_compat_7_2_len);
|
||||
compat_props_add(m->compat_props, pc_compat_7_2, pc_compat_7_2_len);
|
||||
}
|
||||
|
||||
DEFINE_I440FX_MACHINE(v7_2, "pc-i440fx-7.2", NULL,
|
||||
|
@ -451,8 +462,6 @@ static void pc_i440fx_7_1_machine_options(MachineClass *m)
|
|||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_i440fx_7_2_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
pcmc->legacy_no_rng_seed = true;
|
||||
compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len);
|
||||
compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len);
|
||||
|
@ -465,8 +474,6 @@ static void pc_i440fx_7_0_machine_options(MachineClass *m)
|
|||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_i440fx_7_1_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
pcmc->enforce_amd_1tb_hole = false;
|
||||
compat_props_add(m->compat_props, hw_compat_7_0, hw_compat_7_0_len);
|
||||
compat_props_add(m->compat_props, pc_compat_7_0, pc_compat_7_0_len);
|
||||
|
@ -478,8 +485,6 @@ DEFINE_I440FX_MACHINE(v7_0, "pc-i440fx-7.0", NULL,
|
|||
static void pc_i440fx_6_2_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_i440fx_7_0_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
compat_props_add(m->compat_props, hw_compat_6_2, hw_compat_6_2_len);
|
||||
compat_props_add(m->compat_props, pc_compat_6_2, pc_compat_6_2_len);
|
||||
}
|
||||
|
@ -490,8 +495,6 @@ DEFINE_I440FX_MACHINE(v6_2, "pc-i440fx-6.2", NULL,
|
|||
static void pc_i440fx_6_1_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_i440fx_6_2_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
compat_props_add(m->compat_props, hw_compat_6_1, hw_compat_6_1_len);
|
||||
compat_props_add(m->compat_props, pc_compat_6_1, pc_compat_6_1_len);
|
||||
m->smp_props.prefer_sockets = true;
|
||||
|
@ -503,8 +506,6 @@ DEFINE_I440FX_MACHINE(v6_1, "pc-i440fx-6.1", NULL,
|
|||
static void pc_i440fx_6_0_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_i440fx_6_1_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len);
|
||||
compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len);
|
||||
}
|
||||
|
@ -515,8 +516,6 @@ DEFINE_I440FX_MACHINE(v6_0, "pc-i440fx-6.0", NULL,
|
|||
static void pc_i440fx_5_2_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_i440fx_6_0_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
compat_props_add(m->compat_props, hw_compat_5_2, hw_compat_5_2_len);
|
||||
compat_props_add(m->compat_props, pc_compat_5_2, pc_compat_5_2_len);
|
||||
}
|
||||
|
@ -529,8 +528,6 @@ static void pc_i440fx_5_1_machine_options(MachineClass *m)
|
|||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
|
||||
pc_i440fx_5_2_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
compat_props_add(m->compat_props, hw_compat_5_1, hw_compat_5_1_len);
|
||||
compat_props_add(m->compat_props, pc_compat_5_1, pc_compat_5_1_len);
|
||||
pcmc->kvmclock_create_always = false;
|
||||
|
@ -543,8 +540,6 @@ DEFINE_I440FX_MACHINE(v5_1, "pc-i440fx-5.1", NULL,
|
|||
static void pc_i440fx_5_0_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_i440fx_5_1_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
m->numa_mem_supported = true;
|
||||
compat_props_add(m->compat_props, hw_compat_5_0, hw_compat_5_0_len);
|
||||
compat_props_add(m->compat_props, pc_compat_5_0, pc_compat_5_0_len);
|
||||
|
@ -557,8 +552,6 @@ DEFINE_I440FX_MACHINE(v5_0, "pc-i440fx-5.0", NULL,
|
|||
static void pc_i440fx_4_2_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_i440fx_5_0_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
compat_props_add(m->compat_props, hw_compat_4_2, hw_compat_4_2_len);
|
||||
compat_props_add(m->compat_props, pc_compat_4_2, pc_compat_4_2_len);
|
||||
}
|
||||
|
@ -569,8 +562,6 @@ DEFINE_I440FX_MACHINE(v4_2, "pc-i440fx-4.2", NULL,
|
|||
static void pc_i440fx_4_1_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_i440fx_4_2_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
compat_props_add(m->compat_props, hw_compat_4_1, hw_compat_4_1_len);
|
||||
compat_props_add(m->compat_props, pc_compat_4_1, pc_compat_4_1_len);
|
||||
}
|
||||
|
@ -582,8 +573,6 @@ static void pc_i440fx_4_0_machine_options(MachineClass *m)
|
|||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_i440fx_4_1_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->is_default = false;
|
||||
pcmc->default_cpu_version = CPU_VERSION_LEGACY;
|
||||
compat_props_add(m->compat_props, hw_compat_4_0, hw_compat_4_0_len);
|
||||
compat_props_add(m->compat_props, pc_compat_4_0, pc_compat_4_0_len);
|
||||
|
@ -597,9 +586,7 @@ static void pc_i440fx_3_1_machine_options(MachineClass *m)
|
|||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
|
||||
pc_i440fx_4_0_machine_options(m);
|
||||
m->is_default = false;
|
||||
m->smbus_no_migration_support = true;
|
||||
m->alias = NULL;
|
||||
pcmc->pvh_enabled = false;
|
||||
compat_props_add(m->compat_props, hw_compat_3_1, hw_compat_3_1_len);
|
||||
compat_props_add(m->compat_props, pc_compat_3_1, pc_compat_3_1_len);
|
||||
|
|
|
@ -355,6 +355,7 @@ static void pc_q35_machine_options(MachineClass *m)
|
|||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pcmc->default_nic_model = "e1000e";
|
||||
pcmc->pci_root_uid = 0;
|
||||
pcmc->default_cpu_version = 1;
|
||||
|
||||
m->family = "pc_q35";
|
||||
m->desc = "Standard PC (Q35 + ICH9, 2009)";
|
||||
|
@ -370,12 +371,21 @@ static void pc_q35_machine_options(MachineClass *m)
|
|||
m->max_cpus = 288;
|
||||
}
|
||||
|
||||
static void pc_q35_7_2_machine_options(MachineClass *m)
|
||||
static void pc_q35_8_0_machine_options(MachineClass *m)
|
||||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_q35_machine_options(m);
|
||||
m->alias = "q35";
|
||||
pcmc->default_cpu_version = 1;
|
||||
}
|
||||
|
||||
DEFINE_Q35_MACHINE(v8_0, "pc-q35-8.0", NULL,
|
||||
pc_q35_8_0_machine_options);
|
||||
|
||||
static void pc_q35_7_2_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_8_0_machine_options(m);
|
||||
m->alias = NULL;
|
||||
compat_props_add(m->compat_props, hw_compat_7_2, hw_compat_7_2_len);
|
||||
compat_props_add(m->compat_props, pc_compat_7_2, pc_compat_7_2_len);
|
||||
}
|
||||
|
||||
DEFINE_Q35_MACHINE(v7_2, "pc-q35-7.2", NULL,
|
||||
|
@ -385,7 +395,6 @@ static void pc_q35_7_1_machine_options(MachineClass *m)
|
|||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_q35_7_2_machine_options(m);
|
||||
m->alias = NULL;
|
||||
pcmc->legacy_no_rng_seed = true;
|
||||
compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len);
|
||||
compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len);
|
||||
|
@ -398,7 +407,6 @@ static void pc_q35_7_0_machine_options(MachineClass *m)
|
|||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_q35_7_1_machine_options(m);
|
||||
m->alias = NULL;
|
||||
pcmc->enforce_amd_1tb_hole = false;
|
||||
compat_props_add(m->compat_props, hw_compat_7_0, hw_compat_7_0_len);
|
||||
compat_props_add(m->compat_props, pc_compat_7_0, pc_compat_7_0_len);
|
||||
|
@ -410,7 +418,6 @@ DEFINE_Q35_MACHINE(v7_0, "pc-q35-7.0", NULL,
|
|||
static void pc_q35_6_2_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_7_0_machine_options(m);
|
||||
m->alias = NULL;
|
||||
compat_props_add(m->compat_props, hw_compat_6_2, hw_compat_6_2_len);
|
||||
compat_props_add(m->compat_props, pc_compat_6_2, pc_compat_6_2_len);
|
||||
}
|
||||
|
@ -421,7 +428,6 @@ DEFINE_Q35_MACHINE(v6_2, "pc-q35-6.2", NULL,
|
|||
static void pc_q35_6_1_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_6_2_machine_options(m);
|
||||
m->alias = NULL;
|
||||
compat_props_add(m->compat_props, hw_compat_6_1, hw_compat_6_1_len);
|
||||
compat_props_add(m->compat_props, pc_compat_6_1, pc_compat_6_1_len);
|
||||
m->smp_props.prefer_sockets = true;
|
||||
|
@ -433,7 +439,6 @@ DEFINE_Q35_MACHINE(v6_1, "pc-q35-6.1", NULL,
|
|||
static void pc_q35_6_0_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_6_1_machine_options(m);
|
||||
m->alias = NULL;
|
||||
compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len);
|
||||
compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len);
|
||||
}
|
||||
|
@ -444,7 +449,6 @@ DEFINE_Q35_MACHINE(v6_0, "pc-q35-6.0", NULL,
|
|||
static void pc_q35_5_2_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_6_0_machine_options(m);
|
||||
m->alias = NULL;
|
||||
compat_props_add(m->compat_props, hw_compat_5_2, hw_compat_5_2_len);
|
||||
compat_props_add(m->compat_props, pc_compat_5_2, pc_compat_5_2_len);
|
||||
}
|
||||
|
@ -457,7 +461,6 @@ static void pc_q35_5_1_machine_options(MachineClass *m)
|
|||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
|
||||
pc_q35_5_2_machine_options(m);
|
||||
m->alias = NULL;
|
||||
compat_props_add(m->compat_props, hw_compat_5_1, hw_compat_5_1_len);
|
||||
compat_props_add(m->compat_props, pc_compat_5_1, pc_compat_5_1_len);
|
||||
pcmc->kvmclock_create_always = false;
|
||||
|
@ -470,7 +473,6 @@ DEFINE_Q35_MACHINE(v5_1, "pc-q35-5.1", NULL,
|
|||
static void pc_q35_5_0_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_5_1_machine_options(m);
|
||||
m->alias = NULL;
|
||||
m->numa_mem_supported = true;
|
||||
compat_props_add(m->compat_props, hw_compat_5_0, hw_compat_5_0_len);
|
||||
compat_props_add(m->compat_props, pc_compat_5_0, pc_compat_5_0_len);
|
||||
|
@ -483,7 +485,6 @@ DEFINE_Q35_MACHINE(v5_0, "pc-q35-5.0", NULL,
|
|||
static void pc_q35_4_2_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_5_0_machine_options(m);
|
||||
m->alias = NULL;
|
||||
compat_props_add(m->compat_props, hw_compat_4_2, hw_compat_4_2_len);
|
||||
compat_props_add(m->compat_props, pc_compat_4_2, pc_compat_4_2_len);
|
||||
}
|
||||
|
@ -494,7 +495,6 @@ DEFINE_Q35_MACHINE(v4_2, "pc-q35-4.2", NULL,
|
|||
static void pc_q35_4_1_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_4_2_machine_options(m);
|
||||
m->alias = NULL;
|
||||
compat_props_add(m->compat_props, hw_compat_4_1, hw_compat_4_1_len);
|
||||
compat_props_add(m->compat_props, pc_compat_4_1, pc_compat_4_1_len);
|
||||
}
|
||||
|
@ -506,7 +506,6 @@ static void pc_q35_4_0_1_machine_options(MachineClass *m)
|
|||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_q35_4_1_machine_options(m);
|
||||
m->alias = NULL;
|
||||
pcmc->default_cpu_version = CPU_VERSION_LEGACY;
|
||||
/*
|
||||
* This is the default machine for the 4.0-stable branch. It is basically
|
||||
|
@ -524,7 +523,6 @@ static void pc_q35_4_0_machine_options(MachineClass *m)
|
|||
{
|
||||
pc_q35_4_0_1_machine_options(m);
|
||||
m->default_kernel_irqchip_split = true;
|
||||
m->alias = NULL;
|
||||
/* Compat props are applied by the 4.0.1 machine */
|
||||
}
|
||||
|
||||
|
@ -538,7 +536,6 @@ static void pc_q35_3_1_machine_options(MachineClass *m)
|
|||
pc_q35_4_0_machine_options(m);
|
||||
m->default_kernel_irqchip_split = false;
|
||||
m->smbus_no_migration_support = true;
|
||||
m->alias = NULL;
|
||||
pcmc->pvh_enabled = false;
|
||||
compat_props_add(m->compat_props, hw_compat_3_1, hw_compat_3_1_len);
|
||||
compat_props_add(m->compat_props, pc_compat_3_1, pc_compat_3_1_len);
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include "hw/nvram/fw_cfg.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "hw/acpi/acpi_aml_interface.h"
|
||||
#include "trace.h"
|
||||
|
||||
/*****************************************************************************/
|
||||
/* ICH9 LPC PCI to ISA bridge */
|
||||
|
@ -162,6 +163,7 @@ static void ich9_cc_write(void *opaque, hwaddr addr,
|
|||
{
|
||||
ICH9LPCState *lpc = (ICH9LPCState *)opaque;
|
||||
|
||||
trace_ich9_cc_write(addr, val, len);
|
||||
ich9_cc_addr_len(&addr, &len);
|
||||
memcpy(lpc->chip_config + addr, &val, len);
|
||||
pci_bus_fire_intx_routing_notifier(pci_get_bus(&lpc->d));
|
||||
|
@ -177,6 +179,7 @@ static uint64_t ich9_cc_read(void *opaque, hwaddr addr,
|
|||
uint32_t val = 0;
|
||||
ich9_cc_addr_len(&addr, &len);
|
||||
memcpy(&val, lpc->chip_config + addr, len);
|
||||
trace_ich9_cc_read(addr, val, len);
|
||||
return val;
|
||||
}
|
||||
|
||||
|
@ -789,7 +792,7 @@ static const VMStateDescription vmstate_ich9_lpc = {
|
|||
};
|
||||
|
||||
static Property ich9_lpc_properties[] = {
|
||||
DEFINE_PROP_BOOL("noreboot", ICH9LPCState, pin_strap.spkr_hi, true),
|
||||
DEFINE_PROP_BOOL("noreboot", ICH9LPCState, pin_strap.spkr_hi, false),
|
||||
DEFINE_PROP_BOOL("smm-compat", ICH9LPCState, pm.smm_compat, false),
|
||||
DEFINE_PROP_BIT64("x-smi-broadcast", ICH9LPCState, smi_host_features,
|
||||
ICH9_LPC_SMI_F_BROADCAST_BIT, true),
|
||||
|
|
|
@ -21,3 +21,7 @@ via_pm_io_read(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%
|
|||
via_pm_io_write(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x"
|
||||
via_superio_read(uint8_t addr, uint8_t val) "addr 0x%x val 0x%x"
|
||||
via_superio_write(uint8_t addr, uint32_t val) "addr 0x%x val 0x%x"
|
||||
|
||||
# lpc_ich9.c
|
||||
ich9_cc_write(uint64_t addr, uint64_t val, unsigned len) "addr=0x%"PRIx64 " val=0x%"PRIx64 " len=%u"
|
||||
ich9_cc_read(uint64_t addr, uint64_t val, unsigned len) "addr=0x%"PRIx64 " val=0x%"PRIx64 " len=%u"
|
||||
|
|
|
@ -346,10 +346,17 @@ type_init(virt_machine_register_types)
|
|||
} \
|
||||
type_init(machvirt_machine_##major##_##minor##_init);
|
||||
|
||||
static void virt_machine_7_2_options(MachineClass *mc)
|
||||
static void virt_machine_8_0_options(MachineClass *mc)
|
||||
{
|
||||
}
|
||||
DEFINE_VIRT_MACHINE(7, 2, true)
|
||||
DEFINE_VIRT_MACHINE(8, 0, true)
|
||||
|
||||
static void virt_machine_7_2_options(MachineClass *mc)
|
||||
{
|
||||
virt_machine_8_0_options(mc);
|
||||
compat_props_add(mc->compat_props, hw_compat_7_2, hw_compat_7_2_len);
|
||||
}
|
||||
DEFINE_VIRT_MACHINE(7, 2, false)
|
||||
|
||||
static void virt_machine_7_1_options(MachineClass *mc)
|
||||
{
|
||||
|
|
|
@ -295,6 +295,17 @@ static void build_dvsecs(CXLType3Dev *ct3d)
|
|||
cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
|
||||
GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
|
||||
GPF_DEVICE_DVSEC_REVID, dvsec);
|
||||
|
||||
dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
|
||||
.cap = 0x26, /* 68B, IO, Mem, non-MLD */
|
||||
.ctrl = 0x02, /* IO always enabled */
|
||||
.status = 0x26, /* same as capabilities */
|
||||
.rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
|
||||
};
|
||||
cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
|
||||
PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
|
||||
PCIE_FLEXBUS_PORT_DVSEC,
|
||||
PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
|
||||
}
|
||||
|
||||
static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
|
||||
|
|
|
@ -217,7 +217,6 @@ static void cxl_dsp_class_init(ObjectClass *oc, void *data)
|
|||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
|
||||
|
||||
k->is_bridge = true;
|
||||
k->config_write = cxl_dsp_config_write;
|
||||
k->realize = cxl_dsp_realize;
|
||||
k->exit = cxl_dsp_exitfn;
|
||||
|
|
|
@ -375,7 +375,6 @@ static void cxl_upstream_class_init(ObjectClass *oc, void *data)
|
|||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
|
||||
|
||||
k->is_bridge = true;
|
||||
k->config_write = cxl_usp_write_config;
|
||||
k->config_read = cxl_usp_read_config;
|
||||
k->realize = cxl_usp_realize;
|
||||
|
|
|
@ -1,164 +0,0 @@
|
|||
/*
|
||||
* QEMU DEC 21154 PCI bridge
|
||||
*
|
||||
* Copyright (c) 2006-2007 Fabrice Bellard
|
||||
* Copyright (c) 2007 Jocelyn Mayer
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "dec.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/module.h"
|
||||
#include "hw/pci/pci.h"
|
||||
#include "hw/pci/pci_host.h"
|
||||
#include "hw/pci/pci_bridge.h"
|
||||
#include "hw/pci/pci_bus.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(DECState, DEC_21154)
|
||||
|
||||
struct DECState {
|
||||
PCIHostState parent_obj;
|
||||
};
|
||||
|
||||
static int dec_map_irq(PCIDevice *pci_dev, int irq_num)
|
||||
{
|
||||
return irq_num;
|
||||
}
|
||||
|
||||
static void dec_pci_bridge_realize(PCIDevice *pci_dev, Error **errp)
|
||||
{
|
||||
pci_bridge_initfn(pci_dev, TYPE_PCI_BUS);
|
||||
}
|
||||
|
||||
static void dec_21154_pci_bridge_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
|
||||
|
||||
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
|
||||
k->realize = dec_pci_bridge_realize;
|
||||
k->exit = pci_bridge_exitfn;
|
||||
k->vendor_id = PCI_VENDOR_ID_DEC;
|
||||
k->device_id = PCI_DEVICE_ID_DEC_21154;
|
||||
k->config_write = pci_bridge_write_config;
|
||||
k->is_bridge = true;
|
||||
dc->desc = "DEC 21154 PCI-PCI bridge";
|
||||
dc->reset = pci_bridge_reset;
|
||||
dc->vmsd = &vmstate_pci_device;
|
||||
}
|
||||
|
||||
static const TypeInfo dec_21154_pci_bridge_info = {
|
||||
.name = "dec-21154-p2p-bridge",
|
||||
.parent = TYPE_PCI_BRIDGE,
|
||||
.instance_size = sizeof(PCIBridge),
|
||||
.class_init = dec_21154_pci_bridge_class_init,
|
||||
.interfaces = (InterfaceInfo[]) {
|
||||
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
|
||||
{ },
|
||||
},
|
||||
};
|
||||
|
||||
PCIBus *pci_dec_21154_init(PCIBus *parent_bus, int devfn)
|
||||
{
|
||||
PCIDevice *dev;
|
||||
PCIBridge *br;
|
||||
|
||||
dev = pci_new_multifunction(devfn, false, "dec-21154-p2p-bridge");
|
||||
br = PCI_BRIDGE(dev);
|
||||
pci_bridge_map_irq(br, "DEC 21154 PCI-PCI bridge", dec_map_irq);
|
||||
pci_realize_and_unref(dev, parent_bus, &error_fatal);
|
||||
return pci_bridge_get_sec_bus(br);
|
||||
}
|
||||
|
||||
static void pci_dec_21154_device_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
PCIHostState *phb;
|
||||
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
|
||||
|
||||
phb = PCI_HOST_BRIDGE(dev);
|
||||
|
||||
memory_region_init_io(&phb->conf_mem, OBJECT(dev), &pci_host_conf_le_ops,
|
||||
dev, "pci-conf-idx", 0x1000);
|
||||
memory_region_init_io(&phb->data_mem, OBJECT(dev), &pci_host_data_le_ops,
|
||||
dev, "pci-data-idx", 0x1000);
|
||||
sysbus_init_mmio(sbd, &phb->conf_mem);
|
||||
sysbus_init_mmio(sbd, &phb->data_mem);
|
||||
}
|
||||
|
||||
static void dec_21154_pci_host_realize(PCIDevice *d, Error **errp)
|
||||
{
|
||||
/* PCI2PCI bridge same values as PearPC - check this */
|
||||
}
|
||||
|
||||
static void dec_21154_pci_host_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
|
||||
k->realize = dec_21154_pci_host_realize;
|
||||
k->vendor_id = PCI_VENDOR_ID_DEC;
|
||||
k->device_id = PCI_DEVICE_ID_DEC_21154;
|
||||
k->revision = 0x02;
|
||||
k->class_id = PCI_CLASS_BRIDGE_PCI;
|
||||
k->is_bridge = true;
|
||||
/*
|
||||
* PCI-facing part of the host bridge, not usable without the
|
||||
* host-facing part, which can't be device_add'ed, yet.
|
||||
*/
|
||||
dc->user_creatable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo dec_21154_pci_host_info = {
|
||||
.name = "dec-21154",
|
||||
.parent = TYPE_PCI_DEVICE,
|
||||
.instance_size = sizeof(PCIDevice),
|
||||
.class_init = dec_21154_pci_host_class_init,
|
||||
.interfaces = (InterfaceInfo[]) {
|
||||
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
|
||||
{ },
|
||||
},
|
||||
};
|
||||
|
||||
static void pci_dec_21154_device_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->realize = pci_dec_21154_device_realize;
|
||||
}
|
||||
|
||||
static const TypeInfo pci_dec_21154_device_info = {
|
||||
.name = TYPE_DEC_21154,
|
||||
.parent = TYPE_PCI_HOST_BRIDGE,
|
||||
.instance_size = sizeof(DECState),
|
||||
.class_init = pci_dec_21154_device_class_init,
|
||||
};
|
||||
|
||||
static void dec_register_types(void)
|
||||
{
|
||||
type_register_static(&pci_dec_21154_device_info);
|
||||
type_register_static(&dec_21154_pci_host_info);
|
||||
type_register_static(&dec_21154_pci_bridge_info);
|
||||
}
|
||||
|
||||
type_init(dec_register_types)
|
|
@ -1,9 +0,0 @@
|
|||
#ifndef HW_PCI_BRIDGE_DEC_H
|
||||
#define HW_PCI_BRIDGE_DEC_H
|
||||
|
||||
|
||||
#define TYPE_DEC_21154 "dec-21154-sysbus"
|
||||
|
||||
PCIBus *pci_dec_21154_init(PCIBus *parent_bus, int devfn);
|
||||
|
||||
#endif
|
|
@ -92,7 +92,6 @@ static void i82801b11_bridge_class_init(ObjectClass *klass, void *data)
|
|||
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
k->is_bridge = true;
|
||||
k->vendor_id = PCI_VENDOR_ID_INTEL;
|
||||
k->device_id = PCI_DEVICE_ID_INTEL_82801BA_11;
|
||||
k->revision = ICH9_D2P_A2_REVISION;
|
||||
|
|
|
@ -8,8 +8,6 @@ pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c'),
|
|||
pci_ss.add(when: 'CONFIG_XIO3130', if_true: files('xio3130_upstream.c', 'xio3130_downstream.c'))
|
||||
pci_ss.add(when: 'CONFIG_CXL', if_true: files('cxl_root_port.c', 'cxl_upstream.c', 'cxl_downstream.c'))
|
||||
|
||||
# NewWorld PowerMac
|
||||
pci_ss.add(when: 'CONFIG_DEC_PCI', if_true: files('dec.c'))
|
||||
# Sun4u
|
||||
pci_ss.add(when: 'CONFIG_SIMBA', if_true: files('simba.c'))
|
||||
|
||||
|
|
|
@ -254,7 +254,6 @@ static void pci_bridge_dev_class_init(ObjectClass *klass, void *data)
|
|||
k->vendor_id = PCI_VENDOR_ID_REDHAT;
|
||||
k->device_id = PCI_DEVICE_ID_REDHAT_BRIDGE;
|
||||
k->class_id = PCI_CLASS_BRIDGE_PCI;
|
||||
k->is_bridge = true;
|
||||
dc->desc = "Standard PCI Bridge";
|
||||
dc->reset = qdev_pci_bridge_dev_reset;
|
||||
device_class_set_props(dc, pci_bridge_dev_properties);
|
||||
|
|
|
@ -145,7 +145,6 @@ static void pcie_pci_bridge_class_init(ObjectClass *klass, void *data)
|
|||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
|
||||
|
||||
k->is_bridge = true;
|
||||
k->vendor_id = PCI_VENDOR_ID_REDHAT;
|
||||
k->device_id = PCI_DEVICE_ID_REDHAT_PCIE_BRIDGE;
|
||||
k->realize = pcie_pci_bridge_realize;
|
||||
|
|
|
@ -174,7 +174,6 @@ static void rp_class_init(ObjectClass *klass, void *data)
|
|||
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
|
||||
ResettableClass *rc = RESETTABLE_CLASS(klass);
|
||||
|
||||
k->is_bridge = true;
|
||||
k->config_write = rp_write_config;
|
||||
k->realize = rp_realize;
|
||||
k->exit = rp_exit;
|
||||
|
|
|
@ -77,7 +77,6 @@ static void simba_pci_bridge_class_init(ObjectClass *klass, void *data)
|
|||
k->device_id = PCI_DEVICE_ID_SUN_SIMBA;
|
||||
k->revision = 0x11;
|
||||
k->config_write = pci_bridge_write_config;
|
||||
k->is_bridge = true;
|
||||
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
|
||||
dc->reset = pci_bridge_reset;
|
||||
dc->vmsd = &vmstate_pci_device;
|
||||
|
|
|
@ -159,7 +159,6 @@ static void xio3130_downstream_class_init(ObjectClass *klass, void *data)
|
|||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
|
||||
|
||||
k->is_bridge = true;
|
||||
k->config_write = xio3130_downstream_write_config;
|
||||
k->realize = xio3130_downstream_realize;
|
||||
k->exit = xio3130_downstream_exitfn;
|
||||
|
|
|
@ -128,7 +128,6 @@ static void xio3130_upstream_class_init(ObjectClass *klass, void *data)
|
|||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
|
||||
|
||||
k->is_bridge = true;
|
||||
k->config_write = xio3130_upstream_write_config;
|
||||
k->realize = xio3130_upstream_realize;
|
||||
k->exit = xio3130_upstream_exitfn;
|
||||
|
|
|
@ -600,7 +600,6 @@ static void designware_pcie_root_class_init(ObjectClass *klass, void *data)
|
|||
k->device_id = 0xABCD;
|
||||
k->revision = 0;
|
||||
k->class_id = PCI_CLASS_BRIDGE_PCI;
|
||||
k->is_bridge = true;
|
||||
k->exit = pci_bridge_exitfn;
|
||||
k->realize = designware_pcie_root_realize;
|
||||
k->config_read = designware_pcie_root_config_read;
|
||||
|
|
|
@ -128,11 +128,10 @@ static void pci_unin_main_realize(DeviceState *dev, Error **errp)
|
|||
|
||||
pci_create_simple(h->bus, PCI_DEVFN(11, 0), "uni-north-pci");
|
||||
|
||||
/* DEC 21154 bridge */
|
||||
#if 0
|
||||
/* XXX: not activated as PPC BIOS doesn't handle multiple buses properly */
|
||||
pci_create_simple(h->bus, PCI_DEVFN(12, 0), "dec-21154");
|
||||
#endif
|
||||
/*
|
||||
* DEC 21154 bridge was unused for many years, this comment is
|
||||
* a placeholder for whoever wishes to resurrect it
|
||||
*/
|
||||
}
|
||||
|
||||
static void pci_unin_main_init(Object *obj)
|
||||
|
|
|
@ -298,7 +298,6 @@ static void xilinx_pcie_root_class_init(ObjectClass *klass, void *data)
|
|||
k->device_id = 0x7021;
|
||||
k->revision = 0;
|
||||
k->class_id = PCI_CLASS_BRIDGE_HOST;
|
||||
k->is_bridge = true;
|
||||
k->realize = xilinx_pcie_root_realize;
|
||||
k->exit = pci_bridge_exitfn;
|
||||
dc->reset = pci_bridge_reset;
|
||||
|
|
20
hw/pci/pci.c
20
hw/pci/pci.c
|
@ -573,7 +573,7 @@ void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus)
|
|||
for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
|
||||
PCIDevice *dev = bus->devices[i];
|
||||
|
||||
if (dev && PCI_DEVICE_GET_CLASS(dev)->is_bridge) {
|
||||
if (dev && IS_PCI_BRIDGE(dev)) {
|
||||
*min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]);
|
||||
*max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]);
|
||||
}
|
||||
|
@ -589,7 +589,6 @@ static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
|
|||
const VMStateField *field)
|
||||
{
|
||||
PCIDevice *s = container_of(pv, PCIDevice, config);
|
||||
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(s);
|
||||
uint8_t *config;
|
||||
int i;
|
||||
|
||||
|
@ -611,9 +610,8 @@ static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
|
|||
memcpy(s->config, config, size);
|
||||
|
||||
pci_update_mappings(s);
|
||||
if (pc->is_bridge) {
|
||||
PCIBridge *b = PCI_BRIDGE(s);
|
||||
pci_bridge_update_mappings(b);
|
||||
if (IS_PCI_BRIDGE(s)) {
|
||||
pci_bridge_update_mappings(PCI_BRIDGE(s));
|
||||
}
|
||||
|
||||
memory_region_set_enabled(&s->bus_master_enable_region,
|
||||
|
@ -1087,9 +1085,10 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
|
|||
Error *local_err = NULL;
|
||||
DeviceState *dev = DEVICE(pci_dev);
|
||||
PCIBus *bus = pci_get_bus(pci_dev);
|
||||
bool is_bridge = IS_PCI_BRIDGE(pci_dev);
|
||||
|
||||
/* Only pci bridges can be attached to extra PCI root buses */
|
||||
if (pci_bus_is_root(bus) && bus->parent_dev && !pc->is_bridge) {
|
||||
if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) {
|
||||
error_setg(errp,
|
||||
"PCI: Only PCI/PCIe bridges can be plugged into %s",
|
||||
bus->parent_dev->name);
|
||||
|
@ -1151,7 +1150,7 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
|
|||
pci_config_set_revision(pci_dev->config, pc->revision);
|
||||
pci_config_set_class(pci_dev->config, pc->class_id);
|
||||
|
||||
if (!pc->is_bridge) {
|
||||
if (!is_bridge) {
|
||||
if (pc->subsystem_vendor_id || pc->subsystem_id) {
|
||||
pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
|
||||
pc->subsystem_vendor_id);
|
||||
|
@ -1168,7 +1167,7 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
|
|||
pci_init_cmask(pci_dev);
|
||||
pci_init_wmask(pci_dev);
|
||||
pci_init_w1cmask(pci_dev);
|
||||
if (pc->is_bridge) {
|
||||
if (is_bridge) {
|
||||
pci_init_mask_bridge(pci_dev);
|
||||
}
|
||||
pci_init_multifunction(bus, pci_dev, &local_err);
|
||||
|
@ -1916,7 +1915,7 @@ static bool pci_root_bus_in_range(PCIBus *bus, int bus_num)
|
|||
for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
|
||||
PCIDevice *dev = bus->devices[i];
|
||||
|
||||
if (dev && PCI_DEVICE_GET_CLASS(dev)->is_bridge) {
|
||||
if (dev && IS_PCI_BRIDGE(dev)) {
|
||||
if (pci_secondary_bus_in_range(dev, bus_num)) {
|
||||
return true;
|
||||
}
|
||||
|
@ -2623,7 +2622,6 @@ void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, void *opaque)
|
|||
static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque)
|
||||
{
|
||||
Range *range = opaque;
|
||||
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev);
|
||||
uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND);
|
||||
int i;
|
||||
|
||||
|
@ -2631,7 +2629,7 @@ static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque)
|
|||
return;
|
||||
}
|
||||
|
||||
if (pc->is_bridge) {
|
||||
if (IS_PCI_BRIDGE(dev)) {
|
||||
pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
|
||||
pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
|
||||
|
||||
|
|
|
@ -4734,14 +4734,25 @@ static void spapr_machine_latest_class_options(MachineClass *mc)
|
|||
type_init(spapr_machine_register_##suffix)
|
||||
|
||||
/*
|
||||
* pseries-7.2
|
||||
* pseries-8.0
|
||||
*/
|
||||
static void spapr_machine_7_2_class_options(MachineClass *mc)
|
||||
static void spapr_machine_8_0_class_options(MachineClass *mc)
|
||||
{
|
||||
/* Defaults for the latest behaviour inherited from the base class */
|
||||
}
|
||||
|
||||
DEFINE_SPAPR_MACHINE(7_2, "7.2", true);
|
||||
DEFINE_SPAPR_MACHINE(8_0, "8.0", true);
|
||||
|
||||
/*
|
||||
* pseries-7.2
|
||||
*/
|
||||
static void spapr_machine_7_2_class_options(MachineClass *mc)
|
||||
{
|
||||
spapr_machine_8_0_class_options(mc);
|
||||
compat_props_add(mc->compat_props, hw_compat_7_2, hw_compat_7_2_len);
|
||||
}
|
||||
|
||||
DEFINE_SPAPR_MACHINE(7_2, "7.2", false);
|
||||
|
||||
/*
|
||||
* pseries-7.1
|
||||
|
|
|
@ -1361,7 +1361,6 @@ static int spapr_dt_pci_device(SpaprPhbState *sphb, PCIDevice *dev,
|
|||
{
|
||||
int offset;
|
||||
g_autofree gchar *nodename = spapr_pci_fw_dev_name(dev);
|
||||
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev);
|
||||
ResourceProps rp;
|
||||
SpaprDrc *drc = drc_from_dev(sphb, dev);
|
||||
uint32_t vendor_id = pci_default_read_config(dev, PCI_VENDOR_ID, 2);
|
||||
|
@ -1446,7 +1445,7 @@ static int spapr_dt_pci_device(SpaprPhbState *sphb, PCIDevice *dev,
|
|||
|
||||
spapr_phb_nvgpu_populate_pcidev_dt(dev, fdt, offset, sphb);
|
||||
|
||||
if (!pc->is_bridge) {
|
||||
if (!IS_PCI_BRIDGE(dev)) {
|
||||
/* Properties only for non-bridges */
|
||||
uint32_t min_grant = pci_default_read_config(dev, PCI_MIN_GNT, 1);
|
||||
uint32_t max_latency = pci_default_read_config(dev, PCI_MAX_LAT, 1);
|
||||
|
@ -1544,7 +1543,6 @@ static void spapr_pci_pre_plug(HotplugHandler *plug_handler,
|
|||
{
|
||||
SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
|
||||
PCIDevice *pdev = PCI_DEVICE(plugged_dev);
|
||||
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(plugged_dev);
|
||||
SpaprDrc *drc = drc_from_dev(phb, pdev);
|
||||
PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
|
||||
uint32_t slotnr = PCI_SLOT(pdev->devfn);
|
||||
|
@ -1560,7 +1558,7 @@ static void spapr_pci_pre_plug(HotplugHandler *plug_handler,
|
|||
}
|
||||
}
|
||||
|
||||
if (pc->is_bridge) {
|
||||
if (IS_PCI_BRIDGE(plugged_dev)) {
|
||||
if (!bridge_has_valid_chassis_nr(OBJECT(plugged_dev), errp)) {
|
||||
return;
|
||||
}
|
||||
|
@ -1589,7 +1587,6 @@ static void spapr_pci_plug(HotplugHandler *plug_handler,
|
|||
{
|
||||
SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
|
||||
PCIDevice *pdev = PCI_DEVICE(plugged_dev);
|
||||
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(plugged_dev);
|
||||
SpaprDrc *drc = drc_from_dev(phb, pdev);
|
||||
uint32_t slotnr = PCI_SLOT(pdev->devfn);
|
||||
|
||||
|
@ -1603,7 +1600,7 @@ static void spapr_pci_plug(HotplugHandler *plug_handler,
|
|||
|
||||
g_assert(drc);
|
||||
|
||||
if (pc->is_bridge) {
|
||||
if (IS_PCI_BRIDGE(plugged_dev)) {
|
||||
spapr_pci_bridge_plug(phb, PCI_BRIDGE(plugged_dev));
|
||||
}
|
||||
|
||||
|
@ -1646,7 +1643,6 @@ static void spapr_pci_bridge_unplug(SpaprPhbState *phb,
|
|||
static void spapr_pci_unplug(HotplugHandler *plug_handler,
|
||||
DeviceState *plugged_dev, Error **errp)
|
||||
{
|
||||
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(plugged_dev);
|
||||
SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
|
||||
|
||||
/* some version guests do not wait for completion of a device
|
||||
|
@ -1661,7 +1657,7 @@ static void spapr_pci_unplug(HotplugHandler *plug_handler,
|
|||
*/
|
||||
pci_device_reset(PCI_DEVICE(plugged_dev));
|
||||
|
||||
if (pc->is_bridge) {
|
||||
if (IS_PCI_BRIDGE(plugged_dev)) {
|
||||
spapr_pci_bridge_unplug(phb, PCI_BRIDGE(plugged_dev));
|
||||
return;
|
||||
}
|
||||
|
@ -1686,7 +1682,6 @@ static void spapr_pci_unplug_request(HotplugHandler *plug_handler,
|
|||
g_assert(drc->dev == plugged_dev);
|
||||
|
||||
if (!spapr_drc_unplug_requested(drc)) {
|
||||
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(plugged_dev);
|
||||
uint32_t slotnr = PCI_SLOT(pdev->devfn);
|
||||
SpaprDrc *func_drc;
|
||||
SpaprDrcClass *func_drck;
|
||||
|
@ -1694,7 +1689,7 @@ static void spapr_pci_unplug_request(HotplugHandler *plug_handler,
|
|||
int i;
|
||||
uint8_t chassis = chassis_from_bus(pci_get_bus(pdev));
|
||||
|
||||
if (pc->is_bridge) {
|
||||
if (IS_PCI_BRIDGE(plugged_dev)) {
|
||||
error_setg(errp, "PCI: Hot unplug of PCI bridges not supported");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -823,14 +823,26 @@ bool css_migration_enabled(void)
|
|||
} \
|
||||
type_init(ccw_machine_register_##suffix)
|
||||
|
||||
static void ccw_machine_8_0_instance_options(MachineState *machine)
|
||||
{
|
||||
}
|
||||
|
||||
static void ccw_machine_8_0_class_options(MachineClass *mc)
|
||||
{
|
||||
}
|
||||
DEFINE_CCW_MACHINE(8_0, "8.0", true);
|
||||
|
||||
static void ccw_machine_7_2_instance_options(MachineState *machine)
|
||||
{
|
||||
ccw_machine_8_0_instance_options(machine);
|
||||
}
|
||||
|
||||
static void ccw_machine_7_2_class_options(MachineClass *mc)
|
||||
{
|
||||
ccw_machine_8_0_class_options(mc);
|
||||
compat_props_add(mc->compat_props, hw_compat_7_2, hw_compat_7_2_len);
|
||||
}
|
||||
DEFINE_CCW_MACHINE(7_2, "7.2", true);
|
||||
DEFINE_CCW_MACHINE(7_2, "7.2", false);
|
||||
|
||||
static void ccw_machine_7_1_instance_options(MachineState *machine)
|
||||
{
|
||||
|
|
|
@ -85,3 +85,8 @@ config VHOST_USER_GPIO
|
|||
bool
|
||||
default y
|
||||
depends on VIRTIO && VHOST_USER
|
||||
|
||||
config VHOST_VDPA_DEV
|
||||
bool
|
||||
default y
|
||||
depends on VIRTIO && VHOST_VDPA && LINUX
|
||||
|
|
|
@ -3,34 +3,36 @@ softmmu_virtio_ss.add(files('virtio-bus.c'))
|
|||
softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('virtio-pci.c'))
|
||||
softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_MMIO', if_true: files('virtio-mmio.c'))
|
||||
|
||||
virtio_ss = ss.source_set()
|
||||
virtio_ss.add(files('virtio.c'))
|
||||
specific_virtio_ss = ss.source_set()
|
||||
specific_virtio_ss.add(files('virtio.c'))
|
||||
specific_virtio_ss.add(files('virtio-config-io.c', 'virtio-qmp.c'))
|
||||
|
||||
if have_vhost
|
||||
virtio_ss.add(files('vhost.c', 'vhost-backend.c', 'vhost-iova-tree.c'))
|
||||
specific_virtio_ss.add(files('vhost.c', 'vhost-backend.c', 'vhost-iova-tree.c'))
|
||||
if have_vhost_user
|
||||
virtio_ss.add(files('vhost-user.c'))
|
||||
specific_virtio_ss.add(files('vhost-user.c'))
|
||||
endif
|
||||
if have_vhost_vdpa
|
||||
virtio_ss.add(files('vhost-vdpa.c', 'vhost-shadow-virtqueue.c'))
|
||||
specific_virtio_ss.add(files('vhost-vdpa.c', 'vhost-shadow-virtqueue.c'))
|
||||
endif
|
||||
else
|
||||
softmmu_virtio_ss.add(files('vhost-stub.c'))
|
||||
endif
|
||||
|
||||
virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c', 'vhost-vsock-common.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c', 'vhost-vsock-common.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
|
||||
virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c'))
|
||||
virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c', 'vhost-vsock-common.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c', 'vhost-vsock-common.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c'))
|
||||
specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev.c'))
|
||||
|
||||
virtio_pci_ss = ss.source_set()
|
||||
virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c'))
|
||||
|
@ -56,12 +58,14 @@ virtio_pci_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-serial-pc
|
|||
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem-pci.c'))
|
||||
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu-pci.c'))
|
||||
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem-pci.c'))
|
||||
virtio_pci_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev-pci.c'))
|
||||
|
||||
virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss)
|
||||
specific_virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss)
|
||||
|
||||
specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: virtio_ss)
|
||||
softmmu_ss.add_all(when: 'CONFIG_VIRTIO', if_true: softmmu_virtio_ss)
|
||||
softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c'))
|
||||
softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c'))
|
||||
softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c'))
|
||||
softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('virtio-stub.c'))
|
||||
|
||||
specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: specific_virtio_ss)
|
||||
|
|
|
@ -30,8 +30,8 @@ vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32""
|
|||
vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
|
||||
|
||||
# vhost-vdpa.c
|
||||
vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
|
||||
vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
|
||||
vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
|
||||
vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
|
||||
vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
|
||||
vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
|
||||
vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Vhost Vdpa Device PCI Bindings
|
||||
*
|
||||
* Copyright (c) Huawei Technologies Co., Ltd. 2022. All Rights Reserved.
|
||||
*
|
||||
* Authors:
|
||||
* Longpeng <longpeng2@huawei.com>
|
||||
*
|
||||
* Largely based on the "vhost-user-blk-pci.c" and "vhost-user-blk.c"
|
||||
* implemented by:
|
||||
* Changpeng Liu <changpeng.liu@intel.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
||||
* See the COPYING.LIB file in the top-level directory.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include <sys/ioctl.h>
|
||||
#include <linux/vhost.h>
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "hw/virtio/vdpa-dev.h"
|
||||
#include "hw/pci/pci.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/module.h"
|
||||
#include "hw/virtio/virtio-pci.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
|
||||
typedef struct VhostVdpaDevicePCI VhostVdpaDevicePCI;
|
||||
|
||||
#define TYPE_VHOST_VDPA_DEVICE_PCI "vhost-vdpa-device-pci-base"
|
||||
DECLARE_INSTANCE_CHECKER(VhostVdpaDevicePCI, VHOST_VDPA_DEVICE_PCI,
|
||||
TYPE_VHOST_VDPA_DEVICE_PCI)
|
||||
|
||||
struct VhostVdpaDevicePCI {
|
||||
VirtIOPCIProxy parent_obj;
|
||||
VhostVdpaDevice vdev;
|
||||
};
|
||||
|
||||
static void vhost_vdpa_device_pci_instance_init(Object *obj)
|
||||
{
|
||||
VhostVdpaDevicePCI *dev = VHOST_VDPA_DEVICE_PCI(obj);
|
||||
|
||||
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
|
||||
TYPE_VHOST_VDPA_DEVICE);
|
||||
object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
|
||||
"bootindex");
|
||||
}
|
||||
|
||||
static Property vhost_vdpa_device_pci_properties[] = {
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static int vhost_vdpa_device_pci_post_init(VhostVdpaDevice *v, Error **errp)
|
||||
{
|
||||
VhostVdpaDevicePCI *dev = container_of(v, VhostVdpaDevicePCI, vdev);
|
||||
VirtIOPCIProxy *vpci_dev = &dev->parent_obj;
|
||||
|
||||
vpci_dev->class_code = virtio_pci_get_class_id(v->vdev_id);
|
||||
vpci_dev->trans_devid = virtio_pci_get_trans_devid(v->vdev_id);
|
||||
/* one for config vector */
|
||||
vpci_dev->nvectors = v->num_queues + 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_vdpa_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
|
||||
{
|
||||
VhostVdpaDevicePCI *dev = VHOST_VDPA_DEVICE_PCI(vpci_dev);
|
||||
|
||||
dev->vdev.post_init = vhost_vdpa_device_pci_post_init;
|
||||
qdev_realize(DEVICE(&dev->vdev), BUS(&vpci_dev->bus), errp);
|
||||
}
|
||||
|
||||
static void vhost_vdpa_device_pci_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
|
||||
|
||||
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
|
||||
device_class_set_props(dc, vhost_vdpa_device_pci_properties);
|
||||
k->realize = vhost_vdpa_device_pci_realize;
|
||||
}
|
||||
|
||||
static const VirtioPCIDeviceTypeInfo vhost_vdpa_device_pci_info = {
|
||||
.base_name = TYPE_VHOST_VDPA_DEVICE_PCI,
|
||||
.generic_name = "vhost-vdpa-device-pci",
|
||||
.transitional_name = "vhost-vdpa-device-pci-transitional",
|
||||
.non_transitional_name = "vhost-vdpa-device-pci-non-transitional",
|
||||
.instance_size = sizeof(VhostVdpaDevicePCI),
|
||||
.instance_init = vhost_vdpa_device_pci_instance_init,
|
||||
.class_init = vhost_vdpa_device_pci_class_init,
|
||||
};
|
||||
|
||||
static void vhost_vdpa_device_pci_register(void)
|
||||
{
|
||||
virtio_pci_types_register(&vhost_vdpa_device_pci_info);
|
||||
}
|
||||
|
||||
type_init(vhost_vdpa_device_pci_register);
|
|
@ -0,0 +1,377 @@
|
|||
/*
|
||||
* Vhost Vdpa Device
|
||||
*
|
||||
* Copyright (c) Huawei Technologies Co., Ltd. 2022. All Rights Reserved.
|
||||
*
|
||||
* Authors:
|
||||
* Longpeng <longpeng2@huawei.com>
|
||||
*
|
||||
* Largely based on the "vhost-user-blk-pci.c" and "vhost-user-blk.c"
|
||||
* implemented by:
|
||||
* Changpeng Liu <changpeng.liu@intel.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
||||
* See the COPYING.LIB file in the top-level directory.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include <sys/ioctl.h>
|
||||
#include <linux/vhost.h>
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "hw/qdev-core.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "hw/qdev-properties-system.h"
|
||||
#include "hw/virtio/vhost.h"
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "hw/virtio/virtio-bus.h"
|
||||
#include "hw/virtio/virtio-access.h"
|
||||
#include "hw/virtio/vdpa-dev.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/runstate.h"
|
||||
|
||||
static void
|
||||
vhost_vdpa_device_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
||||
{
|
||||
/* Nothing to do */
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
vhost_vdpa_device_get_u32(int fd, unsigned long int cmd, Error **errp)
|
||||
{
|
||||
uint32_t val = (uint32_t)-1;
|
||||
|
||||
if (ioctl(fd, cmd, &val) < 0) {
|
||||
error_setg(errp, "vhost-vdpa-device: cmd 0x%lx failed: %s",
|
||||
cmd, strerror(errno));
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
VhostVdpaDevice *v = VHOST_VDPA_DEVICE(vdev);
|
||||
uint16_t max_queue_size;
|
||||
struct vhost_virtqueue *vqs;
|
||||
int i, ret;
|
||||
|
||||
if (!v->vhostdev) {
|
||||
error_setg(errp, "vhost-vdpa-device: vhostdev are missing");
|
||||
return;
|
||||
}
|
||||
|
||||
v->vhostfd = qemu_open(v->vhostdev, O_RDWR, errp);
|
||||
if (*errp) {
|
||||
return;
|
||||
}
|
||||
v->vdpa.device_fd = v->vhostfd;
|
||||
|
||||
v->vdev_id = vhost_vdpa_device_get_u32(v->vhostfd,
|
||||
VHOST_VDPA_GET_DEVICE_ID, errp);
|
||||
if (*errp) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
max_queue_size = vhost_vdpa_device_get_u32(v->vhostfd,
|
||||
VHOST_VDPA_GET_VRING_NUM, errp);
|
||||
if (*errp) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (v->queue_size > max_queue_size) {
|
||||
error_setg(errp, "vhost-vdpa-device: invalid queue_size: %u (max:%u)",
|
||||
v->queue_size, max_queue_size);
|
||||
goto out;
|
||||
} else if (!v->queue_size) {
|
||||
v->queue_size = max_queue_size;
|
||||
}
|
||||
|
||||
v->num_queues = vhost_vdpa_device_get_u32(v->vhostfd,
|
||||
VHOST_VDPA_GET_VQS_COUNT, errp);
|
||||
if (*errp) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!v->num_queues || v->num_queues > VIRTIO_QUEUE_MAX) {
|
||||
error_setg(errp, "invalid number of virtqueues: %u (max:%u)",
|
||||
v->num_queues, VIRTIO_QUEUE_MAX);
|
||||
goto out;
|
||||
}
|
||||
|
||||
v->dev.nvqs = v->num_queues;
|
||||
vqs = g_new0(struct vhost_virtqueue, v->dev.nvqs);
|
||||
v->dev.vqs = vqs;
|
||||
v->dev.vq_index = 0;
|
||||
v->dev.vq_index_end = v->dev.nvqs;
|
||||
v->dev.backend_features = 0;
|
||||
v->started = false;
|
||||
|
||||
ret = vhost_dev_init(&v->dev, &v->vdpa, VHOST_BACKEND_TYPE_VDPA, 0, NULL);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "vhost-vdpa-device: vhost initialization failed: %s",
|
||||
strerror(-ret));
|
||||
goto free_vqs;
|
||||
}
|
||||
|
||||
v->config_size = vhost_vdpa_device_get_u32(v->vhostfd,
|
||||
VHOST_VDPA_GET_CONFIG_SIZE,
|
||||
errp);
|
||||
if (*errp) {
|
||||
goto vhost_cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoke .post_init() to initialize the transport-specific fields
|
||||
* before calling virtio_init().
|
||||
*/
|
||||
if (v->post_init && v->post_init(v, errp) < 0) {
|
||||
goto vhost_cleanup;
|
||||
}
|
||||
|
||||
v->config = g_malloc0(v->config_size);
|
||||
|
||||
ret = vhost_dev_get_config(&v->dev, v->config, v->config_size, NULL);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "vhost-vdpa-device: get config failed");
|
||||
goto free_config;
|
||||
}
|
||||
|
||||
virtio_init(vdev, v->vdev_id, v->config_size);
|
||||
|
||||
v->virtqs = g_new0(VirtQueue *, v->dev.nvqs);
|
||||
for (i = 0; i < v->dev.nvqs; i++) {
|
||||
v->virtqs[i] = virtio_add_queue(vdev, v->queue_size,
|
||||
vhost_vdpa_device_dummy_handle_output);
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
free_config:
|
||||
g_free(v->config);
|
||||
vhost_cleanup:
|
||||
vhost_dev_cleanup(&v->dev);
|
||||
free_vqs:
|
||||
g_free(vqs);
|
||||
out:
|
||||
qemu_close(v->vhostfd);
|
||||
v->vhostfd = -1;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_device_unrealize(DeviceState *dev)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
|
||||
int i;
|
||||
|
||||
virtio_set_status(vdev, 0);
|
||||
|
||||
for (i = 0; i < s->num_queues; i++) {
|
||||
virtio_delete_queue(s->virtqs[i]);
|
||||
}
|
||||
g_free(s->virtqs);
|
||||
virtio_cleanup(vdev);
|
||||
|
||||
g_free(s->config);
|
||||
g_free(s->dev.vqs);
|
||||
vhost_dev_cleanup(&s->dev);
|
||||
qemu_close(s->vhostfd);
|
||||
s->vhostfd = -1;
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_vdpa_device_get_config(VirtIODevice *vdev, uint8_t *config)
|
||||
{
|
||||
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
|
||||
|
||||
memcpy(config, s->config, s->config_size);
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_vdpa_device_set_config(VirtIODevice *vdev, const uint8_t *config)
|
||||
{
|
||||
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
|
||||
int ret;
|
||||
|
||||
ret = vhost_dev_set_config(&s->dev, s->config, 0, s->config_size,
|
||||
VHOST_SET_CONFIG_TYPE_MASTER);
|
||||
if (ret) {
|
||||
error_report("set device config space failed");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t vhost_vdpa_device_get_features(VirtIODevice *vdev,
|
||||
uint64_t features,
|
||||
Error **errp)
|
||||
{
|
||||
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
|
||||
uint64_t backend_features = s->dev.features;
|
||||
|
||||
if (!virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM)) {
|
||||
virtio_clear_feature(&backend_features, VIRTIO_F_IOMMU_PLATFORM);
|
||||
}
|
||||
|
||||
return backend_features;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_device_start(VirtIODevice *vdev, Error **errp)
|
||||
{
|
||||
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
|
||||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
int i, ret;
|
||||
|
||||
if (!k->set_guest_notifiers) {
|
||||
error_setg(errp, "binding does not support guest notifiers");
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
ret = vhost_dev_enable_notifiers(&s->dev, vdev);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Error enabling host notifiers");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, true);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Error binding guest notifier");
|
||||
goto err_host_notifiers;
|
||||
}
|
||||
|
||||
s->dev.acked_features = vdev->guest_features;
|
||||
|
||||
ret = vhost_dev_start(&s->dev, vdev, false);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Error starting vhost");
|
||||
goto err_guest_notifiers;
|
||||
}
|
||||
s->started = true;
|
||||
|
||||
/*
|
||||
* guest_notifier_mask/pending not used yet, so just unmask
|
||||
* everything here. virtio-pci will do the right thing by
|
||||
* enabling/disabling irqfd.
|
||||
*/
|
||||
for (i = 0; i < s->dev.nvqs; i++) {
|
||||
vhost_virtqueue_mask(&s->dev, vdev, i, false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
err_guest_notifiers:
|
||||
k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
|
||||
err_host_notifiers:
|
||||
vhost_dev_disable_notifiers(&s->dev, vdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_device_stop(VirtIODevice *vdev)
|
||||
{
|
||||
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
|
||||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
int ret;
|
||||
|
||||
if (!s->started) {
|
||||
return;
|
||||
}
|
||||
s->started = false;
|
||||
|
||||
if (!k->set_guest_notifiers) {
|
||||
return;
|
||||
}
|
||||
|
||||
vhost_dev_stop(&s->dev, vdev, false);
|
||||
|
||||
ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
|
||||
if (ret < 0) {
|
||||
error_report("vhost guest notifier cleanup failed: %d", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
vhost_dev_disable_notifiers(&s->dev, vdev);
|
||||
}
|
||||
|
||||
static void vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status)
|
||||
{
|
||||
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
|
||||
bool should_start = virtio_device_started(vdev, status);
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
if (!vdev->vm_running) {
|
||||
should_start = false;
|
||||
}
|
||||
|
||||
if (s->started == should_start) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (should_start) {
|
||||
ret = vhost_vdpa_device_start(vdev, &local_err);
|
||||
if (ret < 0) {
|
||||
error_reportf_err(local_err, "vhost-vdpa-device: start failed: ");
|
||||
}
|
||||
} else {
|
||||
vhost_vdpa_device_stop(vdev);
|
||||
}
|
||||
}
|
||||
|
||||
static Property vhost_vdpa_device_properties[] = {
|
||||
DEFINE_PROP_STRING("vhostdev", VhostVdpaDevice, vhostdev),
|
||||
DEFINE_PROP_UINT16("queue-size", VhostVdpaDevice, queue_size, 0),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_vhost_vdpa_device = {
|
||||
.name = "vhost-vdpa-device",
|
||||
.unmigratable = 1,
|
||||
.minimum_version_id = 1,
|
||||
.version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_VIRTIO_DEVICE,
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
static void vhost_vdpa_device_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
|
||||
device_class_set_props(dc, vhost_vdpa_device_properties);
|
||||
dc->desc = "VDPA-based generic device assignment";
|
||||
dc->vmsd = &vmstate_vhost_vdpa_device;
|
||||
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
|
||||
vdc->realize = vhost_vdpa_device_realize;
|
||||
vdc->unrealize = vhost_vdpa_device_unrealize;
|
||||
vdc->get_config = vhost_vdpa_device_get_config;
|
||||
vdc->set_config = vhost_vdpa_device_set_config;
|
||||
vdc->get_features = vhost_vdpa_device_get_features;
|
||||
vdc->set_status = vhost_vdpa_device_set_status;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_device_instance_init(Object *obj)
|
||||
{
|
||||
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(obj);
|
||||
|
||||
device_add_bootindex_property(obj, &s->bootindex, "bootindex",
|
||||
NULL, DEVICE(obj));
|
||||
}
|
||||
|
||||
static const TypeInfo vhost_vdpa_device_info = {
|
||||
.name = TYPE_VHOST_VDPA_DEVICE,
|
||||
.parent = TYPE_VIRTIO_DEVICE,
|
||||
.instance_size = sizeof(VhostVdpaDevice),
|
||||
.class_init = vhost_vdpa_device_class_init,
|
||||
.instance_init = vhost_vdpa_device_instance_init,
|
||||
};
|
||||
|
||||
static void register_vhost_vdpa_device_type(void)
|
||||
{
|
||||
type_register_static(&vhost_vdpa_device_info);
|
||||
}
|
||||
|
||||
type_init(register_vhost_vdpa_device_type);
|
|
@ -642,18 +642,21 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
|
|||
* @svq: Shadow Virtqueue
|
||||
* @vdev: VirtIO device
|
||||
* @vq: Virtqueue to shadow
|
||||
* @iova_tree: Tree to perform descriptors translations
|
||||
*/
|
||||
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
||||
VirtQueue *vq)
|
||||
VirtQueue *vq, VhostIOVATree *iova_tree)
|
||||
{
|
||||
size_t desc_size, driver_size, device_size;
|
||||
|
||||
event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
|
||||
svq->next_guest_avail_elem = NULL;
|
||||
svq->shadow_avail_idx = 0;
|
||||
svq->shadow_used_idx = 0;
|
||||
svq->last_used_idx = 0;
|
||||
svq->vdev = vdev;
|
||||
svq->vq = vq;
|
||||
svq->iova_tree = iova_tree;
|
||||
|
||||
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
|
||||
driver_size = vhost_svq_driver_area_size(svq);
|
||||
|
@ -704,53 +707,25 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
|
|||
g_free(svq->desc_state);
|
||||
qemu_vfree(svq->vring.desc);
|
||||
qemu_vfree(svq->vring.used);
|
||||
event_notifier_set_handler(&svq->hdev_call, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates vhost shadow virtqueue, and instructs the vhost device to use the
|
||||
* shadow methods and file descriptors.
|
||||
*
|
||||
* @iova_tree: Tree to perform descriptors translations
|
||||
* @ops: SVQ owner callbacks
|
||||
* @ops_opaque: ops opaque pointer
|
||||
*
|
||||
* Returns the new virtqueue or NULL.
|
||||
*
|
||||
* In case of error, reason is reported through error_report.
|
||||
*/
|
||||
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
|
||||
const VhostShadowVirtqueueOps *ops,
|
||||
VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops,
|
||||
void *ops_opaque)
|
||||
{
|
||||
g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
|
||||
int r;
|
||||
|
||||
r = event_notifier_init(&svq->hdev_kick, 0);
|
||||
if (r != 0) {
|
||||
error_report("Couldn't create kick event notifier: %s (%d)",
|
||||
g_strerror(errno), errno);
|
||||
goto err_init_hdev_kick;
|
||||
}
|
||||
|
||||
r = event_notifier_init(&svq->hdev_call, 0);
|
||||
if (r != 0) {
|
||||
error_report("Couldn't create call event notifier: %s (%d)",
|
||||
g_strerror(errno), errno);
|
||||
goto err_init_hdev_call;
|
||||
}
|
||||
VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
|
||||
|
||||
event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
|
||||
event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
|
||||
svq->iova_tree = iova_tree;
|
||||
svq->ops = ops;
|
||||
svq->ops_opaque = ops_opaque;
|
||||
return g_steal_pointer(&svq);
|
||||
|
||||
err_init_hdev_call:
|
||||
event_notifier_cleanup(&svq->hdev_kick);
|
||||
|
||||
err_init_hdev_kick:
|
||||
return NULL;
|
||||
return svq;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -762,8 +737,5 @@ void vhost_svq_free(gpointer pvq)
|
|||
{
|
||||
VhostShadowVirtqueue *vq = pvq;
|
||||
vhost_svq_stop(vq);
|
||||
event_notifier_cleanup(&vq->hdev_kick);
|
||||
event_notifier_set_handler(&vq->hdev_call, NULL);
|
||||
event_notifier_cleanup(&vq->hdev_call);
|
||||
g_free(vq);
|
||||
}
|
||||
|
|
|
@ -126,11 +126,10 @@ size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
|
|||
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
|
||||
|
||||
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
||||
VirtQueue *vq);
|
||||
VirtQueue *vq, VhostIOVATree *iova_tree);
|
||||
void vhost_svq_stop(VhostShadowVirtqueue *svq);
|
||||
|
||||
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
|
||||
const VhostShadowVirtqueueOps *ops,
|
||||
VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops,
|
||||
void *ops_opaque);
|
||||
|
||||
void vhost_svq_free(gpointer vq);
|
||||
|
|
|
@ -527,6 +527,11 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
|
|||
.hdr.size = sizeof(msg.payload.log),
|
||||
};
|
||||
|
||||
/* Send only once with first queue pair */
|
||||
if (dev->vq_index != 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (shmfd && log->fd != -1) {
|
||||
fds[fd_num++] = log->fd;
|
||||
}
|
||||
|
|
|
@ -72,22 +72,28 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
|
|||
return false;
|
||||
}
|
||||
|
||||
int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
|
||||
void *vaddr, bool readonly)
|
||||
/*
|
||||
* The caller must set asid = 0 if the device does not support asid.
|
||||
* This is not an ABI break since it is set to 0 by the initializer anyway.
|
||||
*/
|
||||
int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
||||
hwaddr size, void *vaddr, bool readonly)
|
||||
{
|
||||
struct vhost_msg_v2 msg = {};
|
||||
int fd = v->device_fd;
|
||||
int ret = 0;
|
||||
|
||||
msg.type = v->msg_type;
|
||||
msg.asid = asid;
|
||||
msg.iotlb.iova = iova;
|
||||
msg.iotlb.size = size;
|
||||
msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
|
||||
msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
|
||||
msg.iotlb.type = VHOST_IOTLB_UPDATE;
|
||||
|
||||
trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
|
||||
msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
|
||||
trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova,
|
||||
msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
|
||||
msg.iotlb.type);
|
||||
|
||||
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
|
||||
error_report("failed to write, fd=%d, errno=%d (%s)",
|
||||
|
@ -98,18 +104,24 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size)
|
||||
/*
|
||||
* The caller must set asid = 0 if the device does not support asid.
|
||||
* This is not an ABI break since it is set to 0 by the initializer anyway.
|
||||
*/
|
||||
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
||||
hwaddr size)
|
||||
{
|
||||
struct vhost_msg_v2 msg = {};
|
||||
int fd = v->device_fd;
|
||||
int ret = 0;
|
||||
|
||||
msg.type = v->msg_type;
|
||||
msg.asid = asid;
|
||||
msg.iotlb.iova = iova;
|
||||
msg.iotlb.size = size;
|
||||
msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
|
||||
|
||||
trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
|
||||
trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova,
|
||||
msg.iotlb.size, msg.iotlb.type);
|
||||
|
||||
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
|
||||
|
@ -212,7 +224,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
|
|||
vaddr, section->readonly);
|
||||
|
||||
llsize = int128_sub(llend, int128_make64(iova));
|
||||
if (v->shadow_vqs_enabled) {
|
||||
if (v->shadow_data) {
|
||||
int r;
|
||||
|
||||
mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
|
||||
|
@ -229,8 +241,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
|
|||
}
|
||||
|
||||
vhost_vdpa_iotlb_batch_begin_once(v);
|
||||
ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
|
||||
vaddr, section->readonly);
|
||||
ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
|
||||
int128_get64(llsize), vaddr, section->readonly);
|
||||
if (ret) {
|
||||
error_report("vhost vdpa map fail!");
|
||||
goto fail_map;
|
||||
|
@ -239,7 +251,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
|
|||
return;
|
||||
|
||||
fail_map:
|
||||
if (v->shadow_vqs_enabled) {
|
||||
if (v->shadow_data) {
|
||||
vhost_iova_tree_remove(v->iova_tree, mem_region);
|
||||
}
|
||||
|
||||
|
@ -284,7 +296,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
|
|||
|
||||
llsize = int128_sub(llend, int128_make64(iova));
|
||||
|
||||
if (v->shadow_vqs_enabled) {
|
||||
if (v->shadow_data) {
|
||||
const DMAMap *result;
|
||||
const void *vaddr = memory_region_get_ram_ptr(section->mr) +
|
||||
section->offset_within_region +
|
||||
|
@ -303,7 +315,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
|
|||
vhost_iova_tree_remove(v->iova_tree, *result);
|
||||
}
|
||||
vhost_vdpa_iotlb_batch_begin_once(v);
|
||||
ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
|
||||
ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
|
||||
int128_get64(llsize));
|
||||
if (ret) {
|
||||
error_report("vhost_vdpa dma unmap error!");
|
||||
}
|
||||
|
@ -365,19 +378,6 @@ static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
|
||||
{
|
||||
int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
|
||||
&v->iova_range);
|
||||
if (ret != 0) {
|
||||
v->iova_range.first = 0;
|
||||
v->iova_range.last = UINT64_MAX;
|
||||
}
|
||||
|
||||
trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
|
||||
v->iova_range.last);
|
||||
}
|
||||
|
||||
/*
|
||||
* The use of this function is for requests that only need to be
|
||||
* applied once. Typically such request occurs at the beginning
|
||||
|
@ -402,45 +402,19 @@ static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
|
||||
Error **errp)
|
||||
static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v)
|
||||
{
|
||||
g_autoptr(GPtrArray) shadow_vqs = NULL;
|
||||
uint64_t dev_features, svq_features;
|
||||
int r;
|
||||
bool ok;
|
||||
|
||||
if (!v->shadow_vqs_enabled) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = vhost_vdpa_get_dev_features(hdev, &dev_features);
|
||||
if (r != 0) {
|
||||
error_setg_errno(errp, -r, "Can't get vdpa device features");
|
||||
return r;
|
||||
}
|
||||
|
||||
svq_features = dev_features;
|
||||
ok = vhost_svq_valid_features(svq_features, errp);
|
||||
if (unlikely(!ok)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
|
||||
for (unsigned n = 0; n < hdev->nvqs; ++n) {
|
||||
g_autoptr(VhostShadowVirtqueue) svq;
|
||||
VhostShadowVirtqueue *svq;
|
||||
|
||||
svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
|
||||
v->shadow_vq_ops_opaque);
|
||||
if (unlikely(!svq)) {
|
||||
error_setg(errp, "Cannot create svq %u", n);
|
||||
return -1;
|
||||
}
|
||||
g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
|
||||
svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
|
||||
g_ptr_array_add(shadow_vqs, svq);
|
||||
}
|
||||
|
||||
v->shadow_vqs = g_steal_pointer(&shadow_vqs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
|
||||
|
@ -465,12 +439,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
|
|||
dev->opaque = opaque ;
|
||||
v->listener = vhost_vdpa_memory_listener;
|
||||
v->msg_type = VHOST_IOTLB_MSG_V2;
|
||||
ret = vhost_vdpa_init_svq(dev, v, errp);
|
||||
if (ret) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
vhost_vdpa_get_iova_range(v);
|
||||
vhost_vdpa_init_svq(dev, v);
|
||||
|
||||
if (!vhost_vdpa_first_dev(dev)) {
|
||||
return 0;
|
||||
|
@ -480,10 +449,6 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
|
|||
VIRTIO_CONFIG_S_DRIVER);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
ram_block_discard_disable(false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
|
||||
|
@ -580,10 +545,6 @@ static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
|
|||
struct vhost_vdpa *v = dev->opaque;
|
||||
size_t idx;
|
||||
|
||||
if (!v->shadow_vqs) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
|
||||
vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
|
||||
}
|
||||
|
@ -677,7 +638,8 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
|
|||
{
|
||||
uint64_t features;
|
||||
uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
|
||||
0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
|
||||
0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
|
||||
0x1ULL << VHOST_BACKEND_F_IOTLB_ASID;
|
||||
int r;
|
||||
|
||||
if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
|
||||
|
@ -864,11 +826,23 @@ static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
|
|||
const EventNotifier *event_notifier = &svq->hdev_kick;
|
||||
int r;
|
||||
|
||||
r = event_notifier_init(&svq->hdev_kick, 0);
|
||||
if (r != 0) {
|
||||
error_setg_errno(errp, -r, "Couldn't create kick event notifier");
|
||||
goto err_init_hdev_kick;
|
||||
}
|
||||
|
||||
r = event_notifier_init(&svq->hdev_call, 0);
|
||||
if (r != 0) {
|
||||
error_setg_errno(errp, -r, "Couldn't create call event notifier");
|
||||
goto err_init_hdev_call;
|
||||
}
|
||||
|
||||
file.fd = event_notifier_get_fd(event_notifier);
|
||||
r = vhost_vdpa_set_vring_dev_kick(dev, &file);
|
||||
if (unlikely(r != 0)) {
|
||||
error_setg_errno(errp, -r, "Can't set device kick fd");
|
||||
return r;
|
||||
goto err_init_set_dev_fd;
|
||||
}
|
||||
|
||||
event_notifier = &svq->hdev_call;
|
||||
|
@ -876,8 +850,18 @@ static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
|
|||
r = vhost_vdpa_set_vring_dev_call(dev, &file);
|
||||
if (unlikely(r != 0)) {
|
||||
error_setg_errno(errp, -r, "Can't set device call fd");
|
||||
goto err_init_set_dev_fd;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_init_set_dev_fd:
|
||||
event_notifier_set_handler(&svq->hdev_call, NULL);
|
||||
|
||||
err_init_hdev_call:
|
||||
event_notifier_cleanup(&svq->hdev_kick);
|
||||
|
||||
err_init_hdev_kick:
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -899,7 +883,7 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
|
|||
}
|
||||
|
||||
size = ROUND_UP(result->size, qemu_real_host_page_size());
|
||||
r = vhost_vdpa_dma_unmap(v, result->iova, size);
|
||||
r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size);
|
||||
if (unlikely(r < 0)) {
|
||||
error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
|
||||
return;
|
||||
|
@ -939,7 +923,8 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
|
|||
return false;
|
||||
}
|
||||
|
||||
r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1,
|
||||
r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova,
|
||||
needle->size + 1,
|
||||
(void *)(uintptr_t)needle->translated_addr,
|
||||
needle->perm == IOMMU_RO);
|
||||
if (unlikely(r != 0)) {
|
||||
|
@ -1029,7 +1014,7 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
|
|||
Error *err = NULL;
|
||||
unsigned i;
|
||||
|
||||
if (!v->shadow_vqs) {
|
||||
if (!v->shadow_vqs_enabled) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1045,7 +1030,7 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
|
|||
goto err;
|
||||
}
|
||||
|
||||
vhost_svq_start(svq, dev->vdev, vq);
|
||||
vhost_svq_start(svq, dev->vdev, vq, v->iova_tree);
|
||||
ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
|
||||
if (unlikely(!ok)) {
|
||||
goto err_map;
|
||||
|
@ -1082,13 +1067,16 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
|
|||
{
|
||||
struct vhost_vdpa *v = dev->opaque;
|
||||
|
||||
if (!v->shadow_vqs) {
|
||||
if (!v->shadow_vqs_enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
|
||||
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
|
||||
vhost_vdpa_svq_unmap_rings(dev, svq);
|
||||
|
||||
event_notifier_cleanup(&svq->hdev_kick);
|
||||
event_notifier_cleanup(&svq->hdev_call);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "qemu/range.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/memfd.h"
|
||||
#include "qemu/log.h"
|
||||
#include "standard-headers/linux/vhost_types.h"
|
||||
#include "hw/virtio/virtio-bus.h"
|
||||
#include "hw/virtio/virtio-access.h"
|
||||
|
@ -106,6 +107,24 @@ static void vhost_dev_sync_region(struct vhost_dev *dev,
|
|||
}
|
||||
}
|
||||
|
||||
static bool vhost_dev_has_iommu(struct vhost_dev *dev)
|
||||
{
|
||||
VirtIODevice *vdev = dev->vdev;
|
||||
|
||||
/*
|
||||
* For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
|
||||
* incremental memory mapping API via IOTLB API. For platform that
|
||||
* does not have IOMMU, there's no need to enable this feature
|
||||
* which may cause unnecessary IOTLB miss/update transactions.
|
||||
*/
|
||||
if (vdev) {
|
||||
return virtio_bus_device_iommu_enabled(vdev) &&
|
||||
virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
|
||||
MemoryRegionSection *section,
|
||||
hwaddr first,
|
||||
|
@ -137,8 +156,51 @@ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
|
|||
continue;
|
||||
}
|
||||
|
||||
vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
|
||||
range_get_last(vq->used_phys, vq->used_size));
|
||||
if (vhost_dev_has_iommu(dev)) {
|
||||
IOMMUTLBEntry iotlb;
|
||||
hwaddr used_phys = vq->used_phys, used_size = vq->used_size;
|
||||
hwaddr phys, s, offset;
|
||||
|
||||
while (used_size) {
|
||||
rcu_read_lock();
|
||||
iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
|
||||
used_phys,
|
||||
true,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!iotlb.target_as) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "translation "
|
||||
"failure for used_iova %"PRIx64"\n",
|
||||
used_phys);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
offset = used_phys & iotlb.addr_mask;
|
||||
phys = iotlb.translated_addr + offset;
|
||||
|
||||
/*
|
||||
* Distance from start of used ring until last byte of
|
||||
* IOMMU page.
|
||||
*/
|
||||
s = iotlb.addr_mask - offset;
|
||||
/*
|
||||
* Size of used ring, or of the part of it until end
|
||||
* of IOMMU page. To avoid zero result, do the adding
|
||||
* outside of MIN().
|
||||
*/
|
||||
s = MIN(s, used_size - 1) + 1;
|
||||
|
||||
vhost_dev_sync_region(dev, section, start_addr, end_addr, phys,
|
||||
range_get_last(phys, s));
|
||||
used_size -= s;
|
||||
used_phys += s;
|
||||
}
|
||||
} else {
|
||||
vhost_dev_sync_region(dev, section, start_addr,
|
||||
end_addr, vq->used_phys,
|
||||
range_get_last(vq->used_phys, vq->used_size));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -306,24 +368,6 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
|
|||
dev->log_size = size;
|
||||
}
|
||||
|
||||
static bool vhost_dev_has_iommu(struct vhost_dev *dev)
|
||||
{
|
||||
VirtIODevice *vdev = dev->vdev;
|
||||
|
||||
/*
|
||||
* For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
|
||||
* incremental memory mapping API via IOTLB API. For platform that
|
||||
* does not have IOMMU, there's no need to enable this feature
|
||||
* which may cause unnecessary IOTLB miss/update transactions.
|
||||
*/
|
||||
if (vdev) {
|
||||
return virtio_bus_device_iommu_enabled(vdev) &&
|
||||
virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
|
||||
hwaddr *plen, bool is_write)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
* Virtio Support
|
||||
*
|
||||
* Copyright IBM, Corp. 2007
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "cpu.h"
|
||||
|
||||
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint8_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = ldub_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint16_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = lduw_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint32_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = ldl_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint8_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stb_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint16_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stw_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint32_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stl_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint8_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = ldub_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint16_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = lduw_le_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint32_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = ldl_le_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
void virtio_config_modern_writeb(VirtIODevice *vdev,
|
||||
uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint8_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stb_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_config_modern_writew(VirtIODevice *vdev,
|
||||
uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint16_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stw_le_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_config_modern_writel(VirtIODevice *vdev,
|
||||
uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint32_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stl_le_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "exec/memop.h"
|
||||
#include "standard-headers/linux/virtio_pci.h"
|
||||
#include "standard-headers/linux/virtio_ids.h"
|
||||
#include "hw/boards.h"
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "migration/qemu-file-types.h"
|
||||
|
@ -224,6 +225,90 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
|
|||
return 0;
|
||||
}
|
||||
|
||||
typedef struct VirtIOPCIIDInfo {
|
||||
/* virtio id */
|
||||
uint16_t vdev_id;
|
||||
/* pci device id for the transitional device */
|
||||
uint16_t trans_devid;
|
||||
uint16_t class_id;
|
||||
} VirtIOPCIIDInfo;
|
||||
|
||||
static const VirtIOPCIIDInfo virtio_pci_id_info[] = {
|
||||
{
|
||||
.vdev_id = VIRTIO_ID_CRYPTO,
|
||||
.class_id = PCI_CLASS_OTHERS,
|
||||
}, {
|
||||
.vdev_id = VIRTIO_ID_FS,
|
||||
.class_id = PCI_CLASS_STORAGE_OTHER,
|
||||
}, {
|
||||
.vdev_id = VIRTIO_ID_NET,
|
||||
.trans_devid = PCI_DEVICE_ID_VIRTIO_NET,
|
||||
.class_id = PCI_CLASS_NETWORK_ETHERNET,
|
||||
}, {
|
||||
.vdev_id = VIRTIO_ID_BLOCK,
|
||||
.trans_devid = PCI_DEVICE_ID_VIRTIO_BLOCK,
|
||||
.class_id = PCI_CLASS_STORAGE_SCSI,
|
||||
}, {
|
||||
.vdev_id = VIRTIO_ID_CONSOLE,
|
||||
.trans_devid = PCI_DEVICE_ID_VIRTIO_CONSOLE,
|
||||
.class_id = PCI_CLASS_COMMUNICATION_OTHER,
|
||||
}, {
|
||||
.vdev_id = VIRTIO_ID_SCSI,
|
||||
.trans_devid = PCI_DEVICE_ID_VIRTIO_SCSI,
|
||||
.class_id = PCI_CLASS_STORAGE_SCSI
|
||||
}, {
|
||||
.vdev_id = VIRTIO_ID_9P,
|
||||
.trans_devid = PCI_DEVICE_ID_VIRTIO_9P,
|
||||
.class_id = PCI_BASE_CLASS_NETWORK,
|
||||
}, {
|
||||
.vdev_id = VIRTIO_ID_BALLOON,
|
||||
.trans_devid = PCI_DEVICE_ID_VIRTIO_BALLOON,
|
||||
.class_id = PCI_CLASS_OTHERS,
|
||||
}, {
|
||||
.vdev_id = VIRTIO_ID_RNG,
|
||||
.trans_devid = PCI_DEVICE_ID_VIRTIO_RNG,
|
||||
.class_id = PCI_CLASS_OTHERS,
|
||||
},
|
||||
};
|
||||
|
||||
static const VirtIOPCIIDInfo *virtio_pci_get_id_info(uint16_t vdev_id)
|
||||
{
|
||||
const VirtIOPCIIDInfo *info = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(virtio_pci_id_info); i++) {
|
||||
if (virtio_pci_id_info[i].vdev_id == vdev_id) {
|
||||
info = &virtio_pci_id_info[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!info) {
|
||||
/* The device id is invalid or not added to the id_info yet. */
|
||||
error_report("Invalid virtio device(id %u)", vdev_id);
|
||||
abort();
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the Transitional Device ID for the specific device, return
|
||||
* zero if the device is non-transitional.
|
||||
*/
|
||||
uint16_t virtio_pci_get_trans_devid(uint16_t device_id)
|
||||
{
|
||||
return virtio_pci_get_id_info(device_id)->trans_devid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the Class ID for the specific device.
|
||||
*/
|
||||
uint16_t virtio_pci_get_class_id(uint16_t device_id)
|
||||
{
|
||||
return virtio_pci_get_id_info(device_id)->class_id;
|
||||
}
|
||||
|
||||
static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
|
@ -1729,6 +1814,9 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
|
|||
* is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
|
||||
*/
|
||||
pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
|
||||
if (proxy->trans_devid) {
|
||||
pci_config_set_device_id(config, proxy->trans_devid);
|
||||
}
|
||||
} else {
|
||||
/* pure virtio-1.0 */
|
||||
pci_set_word(config + PCI_VENDOR_ID,
|
||||
|
|
|
@ -0,0 +1,659 @@
|
|||
/*
|
||||
* Virtio QMP helpers
|
||||
*
|
||||
* Copyright IBM, Corp. 2007
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "virtio-qmp.h"
|
||||
|
||||
#include "standard-headers/linux/virtio_ids.h"
|
||||
#include "standard-headers/linux/vhost_types.h"
|
||||
#include "standard-headers/linux/virtio_blk.h"
|
||||
#include "standard-headers/linux/virtio_console.h"
|
||||
#include "standard-headers/linux/virtio_gpu.h"
|
||||
#include "standard-headers/linux/virtio_net.h"
|
||||
#include "standard-headers/linux/virtio_scsi.h"
|
||||
#include "standard-headers/linux/virtio_i2c.h"
|
||||
#include "standard-headers/linux/virtio_balloon.h"
|
||||
#include "standard-headers/linux/virtio_iommu.h"
|
||||
#include "standard-headers/linux/virtio_mem.h"
|
||||
#include "standard-headers/linux/virtio_vsock.h"
|
||||
|
||||
#include CONFIG_DEVICES
|
||||
|
||||
#define FEATURE_ENTRY(name, desc) (qmp_virtio_feature_map_t) \
|
||||
{ .virtio_bit = name, .feature_desc = desc }
|
||||
|
||||
enum VhostUserProtocolFeature {
|
||||
VHOST_USER_PROTOCOL_F_MQ = 0,
|
||||
VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
|
||||
VHOST_USER_PROTOCOL_F_RARP = 2,
|
||||
VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
|
||||
VHOST_USER_PROTOCOL_F_NET_MTU = 4,
|
||||
VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
|
||||
VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
|
||||
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
|
||||
VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
|
||||
VHOST_USER_PROTOCOL_F_CONFIG = 9,
|
||||
VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
|
||||
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
|
||||
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
|
||||
VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
|
||||
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
|
||||
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
|
||||
VHOST_USER_PROTOCOL_F_MAX
|
||||
};
|
||||
|
||||
/* Virtio transport features mapping */
|
||||
static const qmp_virtio_feature_map_t virtio_transport_map[] = {
|
||||
/* Virtio device transport features */
|
||||
#ifndef VIRTIO_CONFIG_NO_LEGACY
|
||||
FEATURE_ENTRY(VIRTIO_F_NOTIFY_ON_EMPTY, \
|
||||
"VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. "
|
||||
"descs. on VQ"),
|
||||
FEATURE_ENTRY(VIRTIO_F_ANY_LAYOUT, \
|
||||
"VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts"),
|
||||
#endif /* !VIRTIO_CONFIG_NO_LEGACY */
|
||||
FEATURE_ENTRY(VIRTIO_F_VERSION_1, \
|
||||
"VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)"),
|
||||
FEATURE_ENTRY(VIRTIO_F_IOMMU_PLATFORM, \
|
||||
"VIRTIO_F_IOMMU_PLATFORM: Device can be used on IOMMU platform"),
|
||||
FEATURE_ENTRY(VIRTIO_F_RING_PACKED, \
|
||||
"VIRTIO_F_RING_PACKED: Device supports packed VQ layout"),
|
||||
FEATURE_ENTRY(VIRTIO_F_IN_ORDER, \
|
||||
"VIRTIO_F_IN_ORDER: Device uses buffers in same order as made "
|
||||
"available by driver"),
|
||||
FEATURE_ENTRY(VIRTIO_F_ORDER_PLATFORM, \
|
||||
"VIRTIO_F_ORDER_PLATFORM: Memory accesses ordered by platform"),
|
||||
FEATURE_ENTRY(VIRTIO_F_SR_IOV, \
|
||||
"VIRTIO_F_SR_IOV: Device supports single root I/O virtualization"),
|
||||
/* Virtio ring transport features */
|
||||
FEATURE_ENTRY(VIRTIO_RING_F_INDIRECT_DESC, \
|
||||
"VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported"),
|
||||
FEATURE_ENTRY(VIRTIO_RING_F_EVENT_IDX, \
|
||||
"VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* Vhost-user protocol features mapping */
|
||||
static const qmp_virtio_feature_map_t vhost_user_protocol_map[] = {
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_MQ, \
|
||||
"VHOST_USER_PROTOCOL_F_MQ: Multiqueue protocol supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_LOG_SHMFD, \
|
||||
"VHOST_USER_PROTOCOL_F_LOG_SHMFD: Shared log memory fd supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RARP, \
|
||||
"VHOST_USER_PROTOCOL_F_RARP: Vhost-user back-end RARP broadcasting "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_REPLY_ACK, \
|
||||
"VHOST_USER_PROTOCOL_F_REPLY_ACK: Requested operation status ack. "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_NET_MTU, \
|
||||
"VHOST_USER_PROTOCOL_F_NET_MTU: Expose host MTU to guest supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_REQ, \
|
||||
"VHOST_USER_PROTOCOL_F_SLAVE_REQ: Socket fd for back-end initiated "
|
||||
"requests supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CROSS_ENDIAN, \
|
||||
"VHOST_USER_PROTOCOL_F_CROSS_ENDIAN: Endianness of VQs for legacy "
|
||||
"devices supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CRYPTO_SESSION, \
|
||||
"VHOST_USER_PROTOCOL_F_CRYPTO_SESSION: Session creation for crypto "
|
||||
"operations supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_PAGEFAULT, \
|
||||
"VHOST_USER_PROTOCOL_F_PAGEFAULT: Request servicing on userfaultfd "
|
||||
"for accessed pages supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIG, \
|
||||
"VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio "
|
||||
"device configuration space supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD, \
|
||||
"VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD: Slave fd communication "
|
||||
"channel supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \
|
||||
"VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified "
|
||||
"VQs supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD, \
|
||||
"VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD: Shared inflight I/O buffers "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RESET_DEVICE, \
|
||||
"VHOST_USER_PROTOCOL_F_RESET_DEVICE: Disabling all rings and "
|
||||
"resetting internal device state supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS, \
|
||||
"VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS: In-band messaging "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS, \
|
||||
"VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS: Configuration for "
|
||||
"memory slots supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio device configuration statuses */
|
||||
static const qmp_virtio_feature_map_t virtio_config_status_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER_OK, \
|
||||
"VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready"),
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_FEATURES_OK, \
|
||||
"VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete"),
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER, \
|
||||
"VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device"),
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_NEEDS_RESET, \
|
||||
"VIRTIO_CONFIG_S_NEEDS_RESET: Irrecoverable error, device needs "
|
||||
"reset"),
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_FAILED, \
|
||||
"VIRTIO_CONFIG_S_FAILED: Error in guest, device failed"),
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_ACKNOWLEDGE, \
|
||||
"VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-blk features mapping */
|
||||
#ifdef CONFIG_VIRTIO_BLK
|
||||
static const qmp_virtio_feature_map_t virtio_blk_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_SIZE_MAX, \
|
||||
"VIRTIO_BLK_F_SIZE_MAX: Max segment size is size_max"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_SEG_MAX, \
|
||||
"VIRTIO_BLK_F_SEG_MAX: Max segments in a request is seg_max"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_GEOMETRY, \
|
||||
"VIRTIO_BLK_F_GEOMETRY: Legacy geometry available"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_RO, \
|
||||
"VIRTIO_BLK_F_RO: Device is read-only"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_BLK_SIZE, \
|
||||
"VIRTIO_BLK_F_BLK_SIZE: Block size of disk available"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_TOPOLOGY, \
|
||||
"VIRTIO_BLK_F_TOPOLOGY: Topology information available"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_MQ, \
|
||||
"VIRTIO_BLK_F_MQ: Multiqueue supported"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_DISCARD, \
|
||||
"VIRTIO_BLK_F_DISCARD: Discard command supported"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_WRITE_ZEROES, \
|
||||
"VIRTIO_BLK_F_WRITE_ZEROES: Write zeroes command supported"),
|
||||
#ifndef VIRTIO_BLK_NO_LEGACY
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_BARRIER, \
|
||||
"VIRTIO_BLK_F_BARRIER: Request barriers supported"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_SCSI, \
|
||||
"VIRTIO_BLK_F_SCSI: SCSI packet commands supported"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_FLUSH, \
|
||||
"VIRTIO_BLK_F_FLUSH: Flush command supported"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_CONFIG_WCE, \
|
||||
"VIRTIO_BLK_F_CONFIG_WCE: Cache writeback and writethrough modes "
|
||||
"supported"),
|
||||
#endif /* !VIRTIO_BLK_NO_LEGACY */
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio-serial features mapping */
|
||||
#ifdef CONFIG_VIRTIO_SERIAL
|
||||
static const qmp_virtio_feature_map_t virtio_serial_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_CONSOLE_F_SIZE, \
|
||||
"VIRTIO_CONSOLE_F_SIZE: Host providing console size"),
|
||||
FEATURE_ENTRY(VIRTIO_CONSOLE_F_MULTIPORT, \
|
||||
"VIRTIO_CONSOLE_F_MULTIPORT: Multiple ports for device supported"),
|
||||
FEATURE_ENTRY(VIRTIO_CONSOLE_F_EMERG_WRITE, \
|
||||
"VIRTIO_CONSOLE_F_EMERG_WRITE: Emergency write supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio-gpu features mapping */
|
||||
#ifdef CONFIG_VIRTIO_GPU
|
||||
static const qmp_virtio_feature_map_t virtio_gpu_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_GPU_F_VIRGL, \
|
||||
"VIRTIO_GPU_F_VIRGL: Virgl 3D mode supported"),
|
||||
FEATURE_ENTRY(VIRTIO_GPU_F_EDID, \
|
||||
"VIRTIO_GPU_F_EDID: EDID metadata supported"),
|
||||
FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_UUID, \
|
||||
"VIRTIO_GPU_F_RESOURCE_UUID: Resource UUID assigning supported"),
|
||||
FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_BLOB, \
|
||||
"VIRTIO_GPU_F_RESOURCE_BLOB: Size-based blob resources supported"),
|
||||
FEATURE_ENTRY(VIRTIO_GPU_F_CONTEXT_INIT, \
|
||||
"VIRTIO_GPU_F_CONTEXT_INIT: Context types and synchronization "
|
||||
"timelines supported"),
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio-input features mapping */
|
||||
#ifdef CONFIG_VIRTIO_INPUT
|
||||
static const qmp_virtio_feature_map_t virtio_input_feature_map[] = {
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio-net features mapping */
|
||||
#ifdef CONFIG_VIRTIO_NET
|
||||
static const qmp_virtio_feature_map_t virtio_net_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CSUM, \
|
||||
"VIRTIO_NET_F_CSUM: Device handling packets with partial checksum "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_CSUM, \
|
||||
"VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial "
|
||||
"checksum supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
|
||||
"VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading "
|
||||
"reconfig. supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_MTU, \
|
||||
"VIRTIO_NET_F_MTU: Device max MTU reporting supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_MAC, \
|
||||
"VIRTIO_NET_F_MAC: Device has given MAC address"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO4, \
|
||||
"VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO6, \
|
||||
"VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ECN, \
|
||||
"VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_UFO, \
|
||||
"VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO4, \
|
||||
"VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO6, \
|
||||
"VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_HOST_ECN, \
|
||||
"VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_HOST_UFO, \
|
||||
"VIRTIO_NET_F_HOST_UFO: Device can receive UFO"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_MRG_RXBUF, \
|
||||
"VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_STATUS, \
|
||||
"VIRTIO_NET_F_STATUS: Configuration status field available"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VQ, \
|
||||
"VIRTIO_NET_F_CTRL_VQ: Control channel available"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX, \
|
||||
"VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VLAN, \
|
||||
"VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX_EXTRA, \
|
||||
"VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ANNOUNCE, \
|
||||
"VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_MQ, \
|
||||
"VIRTIO_NET_F_MQ: Multiqueue with automatic receive steering "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_MAC_ADDR, \
|
||||
"VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control "
|
||||
"channel"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_HASH_REPORT, \
|
||||
"VIRTIO_NET_F_HASH_REPORT: Hash reporting supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_RSS, \
|
||||
"VIRTIO_NET_F_RSS: RSS RX steering supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_RSC_EXT, \
|
||||
"VIRTIO_NET_F_RSC_EXT: Extended coalescing info supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_STANDBY, \
|
||||
"VIRTIO_NET_F_STANDBY: Device acting as standby for primary "
|
||||
"device with same MAC addr. supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_SPEED_DUPLEX, \
|
||||
"VIRTIO_NET_F_SPEED_DUPLEX: Device set linkspeed and duplex"),
|
||||
#ifndef VIRTIO_NET_NO_LEGACY
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GSO, \
|
||||
"VIRTIO_NET_F_GSO: Handling GSO-type packets supported"),
|
||||
#endif /* !VIRTIO_NET_NO_LEGACY */
|
||||
FEATURE_ENTRY(VHOST_NET_F_VIRTIO_NET_HDR, \
|
||||
"VHOST_NET_F_VIRTIO_NET_HDR: Virtio-net headers for RX and TX "
|
||||
"packets supported"),
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio-scsi features mapping */
|
||||
#ifdef CONFIG_VIRTIO_SCSI
|
||||
static const qmp_virtio_feature_map_t virtio_scsi_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_SCSI_F_INOUT, \
|
||||
"VIRTIO_SCSI_F_INOUT: Requests including read and writable data "
|
||||
"buffers suppoted"),
|
||||
FEATURE_ENTRY(VIRTIO_SCSI_F_HOTPLUG, \
|
||||
"VIRTIO_SCSI_F_HOTPLUG: Reporting and handling hot-plug events "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VIRTIO_SCSI_F_CHANGE, \
|
||||
"VIRTIO_SCSI_F_CHANGE: Reporting and handling LUN changes "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VIRTIO_SCSI_F_T10_PI, \
|
||||
"VIRTIO_SCSI_F_T10_PI: T10 info included in request header"),
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio/vhost-user-fs features mapping */
|
||||
#ifdef CONFIG_VHOST_USER_FS
|
||||
static const qmp_virtio_feature_map_t virtio_fs_feature_map[] = {
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio/vhost-user-i2c features mapping */
|
||||
#ifdef CONFIG_VIRTIO_I2C_ADAPTER
|
||||
static const qmp_virtio_feature_map_t virtio_i2c_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_I2C_F_ZERO_LENGTH_REQUEST, \
|
||||
"VIRTIO_I2C_F_ZERO_LEGNTH_REQUEST: Zero length requests supported"),
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio/vhost-vsock features mapping */
|
||||
#ifdef CONFIG_VHOST_VSOCK
|
||||
static const qmp_virtio_feature_map_t virtio_vsock_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_VSOCK_F_SEQPACKET, \
|
||||
"VIRTIO_VSOCK_F_SEQPACKET: SOCK_SEQPACKET supported"),
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio-balloon features mapping */
|
||||
#ifdef CONFIG_VIRTIO_BALLOON
|
||||
static const qmp_virtio_feature_map_t virtio_balloon_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_MUST_TELL_HOST, \
|
||||
"VIRTIO_BALLOON_F_MUST_TELL_HOST: Tell host before reclaiming "
|
||||
"pages"),
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_STATS_VQ, \
|
||||
"VIRTIO_BALLOON_F_STATS_VQ: Guest memory stats VQ available"),
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_DEFLATE_ON_OOM, \
|
||||
"VIRTIO_BALLOON_F_DEFLATE_ON_OOM: Deflate balloon when guest OOM"),
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_FREE_PAGE_HINT, \
|
||||
"VIRTIO_BALLOON_F_FREE_PAGE_HINT: VQ reporting free pages enabled"),
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_PAGE_POISON, \
|
||||
"VIRTIO_BALLOON_F_PAGE_POISON: Guest page poisoning enabled"),
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_REPORTING, \
|
||||
"VIRTIO_BALLOON_F_REPORTING: Page reporting VQ enabled"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio-crypto features mapping */
|
||||
#ifdef CONFIG_VIRTIO_CRYPTO
|
||||
static const qmp_virtio_feature_map_t virtio_crypto_feature_map[] = {
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio-iommu features mapping */
|
||||
#ifdef CONFIG_VIRTIO_IOMMU
|
||||
static const qmp_virtio_feature_map_t virtio_iommu_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_INPUT_RANGE, \
|
||||
"VIRTIO_IOMMU_F_INPUT_RANGE: Range of available virtual addrs. "
|
||||
"available"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_DOMAIN_RANGE, \
|
||||
"VIRTIO_IOMMU_F_DOMAIN_RANGE: Number of supported domains "
|
||||
"available"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_MAP_UNMAP, \
|
||||
"VIRTIO_IOMMU_F_MAP_UNMAP: Map and unmap requests available"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS, \
|
||||
"VIRTIO_IOMMU_F_BYPASS: Endpoints not attached to domains are in "
|
||||
"bypass mode"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_PROBE, \
|
||||
"VIRTIO_IOMMU_F_PROBE: Probe requests available"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_MMIO, \
|
||||
"VIRTIO_IOMMU_F_MMIO: VIRTIO_IOMMU_MAP_F_MMIO flag available"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS_CONFIG, \
|
||||
"VIRTIO_IOMMU_F_BYPASS_CONFIG: Bypass field of IOMMU config "
|
||||
"available"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio-mem features mapping */
|
||||
#ifdef CONFIG_VIRTIO_MEM
|
||||
static const qmp_virtio_feature_map_t virtio_mem_feature_map[] = {
|
||||
#ifndef CONFIG_ACPI
|
||||
FEATURE_ENTRY(VIRTIO_MEM_F_ACPI_PXM, \
|
||||
"VIRTIO_MEM_F_ACPI_PXM: node_id is an ACPI PXM and is valid"),
|
||||
#endif /* !CONFIG_ACPI */
|
||||
FEATURE_ENTRY(VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, \
|
||||
"VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE: Unplugged memory cannot be "
|
||||
"accessed"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
/* virtio-rng features mapping */
|
||||
#ifdef CONFIG_VIRTIO_RNG
|
||||
static const qmp_virtio_feature_map_t virtio_rng_feature_map[] = {
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
#endif
|
||||
|
||||
#define CONVERT_FEATURES(type, map, is_status, bitmap) \
|
||||
({ \
|
||||
type *list = NULL; \
|
||||
type *node; \
|
||||
for (i = 0; map[i].virtio_bit != -1; i++) { \
|
||||
if (is_status) { \
|
||||
bit = map[i].virtio_bit; \
|
||||
} \
|
||||
else { \
|
||||
bit = 1ULL << map[i].virtio_bit; \
|
||||
} \
|
||||
if ((bitmap & bit) == 0) { \
|
||||
continue; \
|
||||
} \
|
||||
node = g_new0(type, 1); \
|
||||
node->value = g_strdup(map[i].feature_desc); \
|
||||
node->next = list; \
|
||||
list = node; \
|
||||
bitmap ^= bit; \
|
||||
} \
|
||||
list; \
|
||||
})
|
||||
|
||||
VirtioDeviceStatus *qmp_decode_status(uint8_t bitmap)
|
||||
{
|
||||
VirtioDeviceStatus *status;
|
||||
uint8_t bit;
|
||||
int i;
|
||||
|
||||
status = g_new0(VirtioDeviceStatus, 1);
|
||||
status->statuses = CONVERT_FEATURES(strList, virtio_config_status_map,
|
||||
1, bitmap);
|
||||
status->has_unknown_statuses = bitmap != 0;
|
||||
if (status->has_unknown_statuses) {
|
||||
status->unknown_statuses = bitmap;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
VhostDeviceProtocols *qmp_decode_protocols(uint64_t bitmap)
|
||||
{
|
||||
VhostDeviceProtocols *vhu_protocols;
|
||||
uint64_t bit;
|
||||
int i;
|
||||
|
||||
vhu_protocols = g_new0(VhostDeviceProtocols, 1);
|
||||
vhu_protocols->protocols =
|
||||
CONVERT_FEATURES(strList,
|
||||
vhost_user_protocol_map, 0, bitmap);
|
||||
vhu_protocols->has_unknown_protocols = bitmap != 0;
|
||||
if (vhu_protocols->has_unknown_protocols) {
|
||||
vhu_protocols->unknown_protocols = bitmap;
|
||||
}
|
||||
|
||||
return vhu_protocols;
|
||||
}
|
||||
|
||||
VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, uint64_t bitmap)
|
||||
{
|
||||
VirtioDeviceFeatures *features;
|
||||
uint64_t bit;
|
||||
int i;
|
||||
|
||||
features = g_new0(VirtioDeviceFeatures, 1);
|
||||
features->has_dev_features = true;
|
||||
|
||||
/* transport features */
|
||||
features->transports = CONVERT_FEATURES(strList, virtio_transport_map, 0,
|
||||
bitmap);
|
||||
|
||||
/* device features */
|
||||
switch (device_id) {
|
||||
#ifdef CONFIG_VIRTIO_SERIAL
|
||||
case VIRTIO_ID_CONSOLE:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_serial_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_BLK
|
||||
case VIRTIO_ID_BLOCK:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_blk_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_GPU
|
||||
case VIRTIO_ID_GPU:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_gpu_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_NET
|
||||
case VIRTIO_ID_NET:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_net_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_SCSI
|
||||
case VIRTIO_ID_SCSI:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_scsi_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_BALLOON
|
||||
case VIRTIO_ID_BALLOON:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_balloon_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_IOMMU
|
||||
case VIRTIO_ID_IOMMU:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_iommu_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_INPUT
|
||||
case VIRTIO_ID_INPUT:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_input_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VHOST_USER_FS
|
||||
case VIRTIO_ID_FS:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_fs_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VHOST_VSOCK
|
||||
case VIRTIO_ID_VSOCK:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_vsock_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_CRYPTO
|
||||
case VIRTIO_ID_CRYPTO:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_crypto_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_MEM
|
||||
case VIRTIO_ID_MEM:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_mem_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_I2C_ADAPTER
|
||||
case VIRTIO_ID_I2C_ADAPTER:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_i2c_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_RNG
|
||||
case VIRTIO_ID_RNG:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_rng_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
/* No features */
|
||||
case VIRTIO_ID_9P:
|
||||
case VIRTIO_ID_PMEM:
|
||||
case VIRTIO_ID_IOMEM:
|
||||
case VIRTIO_ID_RPMSG:
|
||||
case VIRTIO_ID_CLOCK:
|
||||
case VIRTIO_ID_MAC80211_WLAN:
|
||||
case VIRTIO_ID_MAC80211_HWSIM:
|
||||
case VIRTIO_ID_RPROC_SERIAL:
|
||||
case VIRTIO_ID_MEMORY_BALLOON:
|
||||
case VIRTIO_ID_CAIF:
|
||||
case VIRTIO_ID_SIGNAL_DIST:
|
||||
case VIRTIO_ID_PSTORE:
|
||||
case VIRTIO_ID_SOUND:
|
||||
case VIRTIO_ID_BT:
|
||||
case VIRTIO_ID_RPMB:
|
||||
case VIRTIO_ID_VIDEO_ENCODER:
|
||||
case VIRTIO_ID_VIDEO_DECODER:
|
||||
case VIRTIO_ID_SCMI:
|
||||
case VIRTIO_ID_NITRO_SEC_MOD:
|
||||
case VIRTIO_ID_WATCHDOG:
|
||||
case VIRTIO_ID_CAN:
|
||||
case VIRTIO_ID_DMABUF:
|
||||
case VIRTIO_ID_PARAM_SERV:
|
||||
case VIRTIO_ID_AUDIO_POLICY:
|
||||
case VIRTIO_ID_GPIO:
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
features->has_unknown_dev_features = bitmap != 0;
|
||||
if (features->has_unknown_dev_features) {
|
||||
features->unknown_dev_features = bitmap;
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* Virtio QMP helpers
|
||||
*
|
||||
* Copyright IBM, Corp. 2007
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
#ifndef HW_VIRTIO_QMP_H
|
||||
#define HW_VIRTIO_QMP_H
|
||||
|
||||
#include "qapi/qapi-types-virtio.h"
|
||||
|
||||
VirtioDeviceStatus *qmp_decode_status(uint8_t bitmap);
|
||||
VhostDeviceProtocols *qmp_decode_protocols(uint64_t bitmap);
|
||||
VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, uint64_t bitmap);
|
||||
|
||||
#endif
|
|
@ -16,15 +16,14 @@
|
|||
#include "qapi/qmp/qdict.h"
|
||||
#include "qapi/qapi-commands-virtio.h"
|
||||
#include "qapi/qapi-commands-qom.h"
|
||||
#include "qapi/qapi-visit-virtio.h"
|
||||
#include "qapi/qmp/qjson.h"
|
||||
#include "cpu.h"
|
||||
#include "trace.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qom/object_interfaces.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "migration/qemu-file-types.h"
|
||||
#include "qemu/atomic.h"
|
||||
|
@ -33,6 +32,8 @@
|
|||
#include "hw/virtio/virtio-access.h"
|
||||
#include "sysemu/dma.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "virtio-qmp.h"
|
||||
|
||||
#include "standard-headers/linux/virtio_ids.h"
|
||||
#include "standard-headers/linux/vhost_types.h"
|
||||
#include "standard-headers/linux/virtio_blk.h"
|
||||
|
@ -45,7 +46,6 @@
|
|||
#include "standard-headers/linux/virtio_iommu.h"
|
||||
#include "standard-headers/linux/virtio_mem.h"
|
||||
#include "standard-headers/linux/virtio_vsock.h"
|
||||
#include CONFIG_DEVICES
|
||||
|
||||
/* QAPI list of realized VirtIODevices */
|
||||
static QTAILQ_HEAD(, VirtIODevice) virtio_list;
|
||||
|
@ -55,412 +55,6 @@ static QTAILQ_HEAD(, VirtIODevice) virtio_list;
|
|||
*/
|
||||
#define VHOST_USER_MAX_CONFIG_SIZE 256
|
||||
|
||||
#define FEATURE_ENTRY(name, desc) (qmp_virtio_feature_map_t) \
|
||||
{ .virtio_bit = name, .feature_desc = desc }
|
||||
|
||||
enum VhostUserProtocolFeature {
|
||||
VHOST_USER_PROTOCOL_F_MQ = 0,
|
||||
VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
|
||||
VHOST_USER_PROTOCOL_F_RARP = 2,
|
||||
VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
|
||||
VHOST_USER_PROTOCOL_F_NET_MTU = 4,
|
||||
VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
|
||||
VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
|
||||
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
|
||||
VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
|
||||
VHOST_USER_PROTOCOL_F_CONFIG = 9,
|
||||
VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
|
||||
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
|
||||
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
|
||||
VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
|
||||
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
|
||||
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
|
||||
VHOST_USER_PROTOCOL_F_MAX
|
||||
};
|
||||
|
||||
/* Virtio transport features mapping */
|
||||
static qmp_virtio_feature_map_t virtio_transport_map[] = {
|
||||
/* Virtio device transport features */
|
||||
#ifndef VIRTIO_CONFIG_NO_LEGACY
|
||||
FEATURE_ENTRY(VIRTIO_F_NOTIFY_ON_EMPTY, \
|
||||
"VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. "
|
||||
"descs. on VQ"),
|
||||
FEATURE_ENTRY(VIRTIO_F_ANY_LAYOUT, \
|
||||
"VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts"),
|
||||
#endif /* !VIRTIO_CONFIG_NO_LEGACY */
|
||||
FEATURE_ENTRY(VIRTIO_F_VERSION_1, \
|
||||
"VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)"),
|
||||
FEATURE_ENTRY(VIRTIO_F_IOMMU_PLATFORM, \
|
||||
"VIRTIO_F_IOMMU_PLATFORM: Device can be used on IOMMU platform"),
|
||||
FEATURE_ENTRY(VIRTIO_F_RING_PACKED, \
|
||||
"VIRTIO_F_RING_PACKED: Device supports packed VQ layout"),
|
||||
FEATURE_ENTRY(VIRTIO_F_IN_ORDER, \
|
||||
"VIRTIO_F_IN_ORDER: Device uses buffers in same order as made "
|
||||
"available by driver"),
|
||||
FEATURE_ENTRY(VIRTIO_F_ORDER_PLATFORM, \
|
||||
"VIRTIO_F_ORDER_PLATFORM: Memory accesses ordered by platform"),
|
||||
FEATURE_ENTRY(VIRTIO_F_SR_IOV, \
|
||||
"VIRTIO_F_SR_IOV: Device supports single root I/O virtualization"),
|
||||
/* Virtio ring transport features */
|
||||
FEATURE_ENTRY(VIRTIO_RING_F_INDIRECT_DESC, \
|
||||
"VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported"),
|
||||
FEATURE_ENTRY(VIRTIO_RING_F_EVENT_IDX, \
|
||||
"VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* Vhost-user protocol features mapping */
|
||||
static qmp_virtio_feature_map_t vhost_user_protocol_map[] = {
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_MQ, \
|
||||
"VHOST_USER_PROTOCOL_F_MQ: Multiqueue protocol supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_LOG_SHMFD, \
|
||||
"VHOST_USER_PROTOCOL_F_LOG_SHMFD: Shared log memory fd supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RARP, \
|
||||
"VHOST_USER_PROTOCOL_F_RARP: Vhost-user back-end RARP broadcasting "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_REPLY_ACK, \
|
||||
"VHOST_USER_PROTOCOL_F_REPLY_ACK: Requested operation status ack. "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_NET_MTU, \
|
||||
"VHOST_USER_PROTOCOL_F_NET_MTU: Expose host MTU to guest supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_REQ, \
|
||||
"VHOST_USER_PROTOCOL_F_SLAVE_REQ: Socket fd for back-end initiated "
|
||||
"requests supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CROSS_ENDIAN, \
|
||||
"VHOST_USER_PROTOCOL_F_CROSS_ENDIAN: Endianness of VQs for legacy "
|
||||
"devices supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CRYPTO_SESSION, \
|
||||
"VHOST_USER_PROTOCOL_F_CRYPTO_SESSION: Session creation for crypto "
|
||||
"operations supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_PAGEFAULT, \
|
||||
"VHOST_USER_PROTOCOL_F_PAGEFAULT: Request servicing on userfaultfd "
|
||||
"for accessed pages supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIG, \
|
||||
"VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio "
|
||||
"device configuration space supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD, \
|
||||
"VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD: Slave fd communication "
|
||||
"channel supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \
|
||||
"VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified "
|
||||
"VQs supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD, \
|
||||
"VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD: Shared inflight I/O buffers "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RESET_DEVICE, \
|
||||
"VHOST_USER_PROTOCOL_F_RESET_DEVICE: Disabling all rings and "
|
||||
"resetting internal device state supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS, \
|
||||
"VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS: In-band messaging "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS, \
|
||||
"VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS: Configuration for "
|
||||
"memory slots supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio device configuration statuses */
|
||||
static qmp_virtio_feature_map_t virtio_config_status_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER_OK, \
|
||||
"VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready"),
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_FEATURES_OK, \
|
||||
"VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete"),
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER, \
|
||||
"VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device"),
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_NEEDS_RESET, \
|
||||
"VIRTIO_CONFIG_S_NEEDS_RESET: Irrecoverable error, device needs "
|
||||
"reset"),
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_FAILED, \
|
||||
"VIRTIO_CONFIG_S_FAILED: Error in guest, device failed"),
|
||||
FEATURE_ENTRY(VIRTIO_CONFIG_S_ACKNOWLEDGE, \
|
||||
"VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-blk features mapping */
|
||||
qmp_virtio_feature_map_t virtio_blk_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_SIZE_MAX, \
|
||||
"VIRTIO_BLK_F_SIZE_MAX: Max segment size is size_max"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_SEG_MAX, \
|
||||
"VIRTIO_BLK_F_SEG_MAX: Max segments in a request is seg_max"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_GEOMETRY, \
|
||||
"VIRTIO_BLK_F_GEOMETRY: Legacy geometry available"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_RO, \
|
||||
"VIRTIO_BLK_F_RO: Device is read-only"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_BLK_SIZE, \
|
||||
"VIRTIO_BLK_F_BLK_SIZE: Block size of disk available"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_TOPOLOGY, \
|
||||
"VIRTIO_BLK_F_TOPOLOGY: Topology information available"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_MQ, \
|
||||
"VIRTIO_BLK_F_MQ: Multiqueue supported"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_DISCARD, \
|
||||
"VIRTIO_BLK_F_DISCARD: Discard command supported"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_WRITE_ZEROES, \
|
||||
"VIRTIO_BLK_F_WRITE_ZEROES: Write zeroes command supported"),
|
||||
#ifndef VIRTIO_BLK_NO_LEGACY
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_BARRIER, \
|
||||
"VIRTIO_BLK_F_BARRIER: Request barriers supported"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_SCSI, \
|
||||
"VIRTIO_BLK_F_SCSI: SCSI packet commands supported"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_FLUSH, \
|
||||
"VIRTIO_BLK_F_FLUSH: Flush command supported"),
|
||||
FEATURE_ENTRY(VIRTIO_BLK_F_CONFIG_WCE, \
|
||||
"VIRTIO_BLK_F_CONFIG_WCE: Cache writeback and writethrough modes "
|
||||
"supported"),
|
||||
#endif /* !VIRTIO_BLK_NO_LEGACY */
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-serial features mapping */
|
||||
qmp_virtio_feature_map_t virtio_serial_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_CONSOLE_F_SIZE, \
|
||||
"VIRTIO_CONSOLE_F_SIZE: Host providing console size"),
|
||||
FEATURE_ENTRY(VIRTIO_CONSOLE_F_MULTIPORT, \
|
||||
"VIRTIO_CONSOLE_F_MULTIPORT: Multiple ports for device supported"),
|
||||
FEATURE_ENTRY(VIRTIO_CONSOLE_F_EMERG_WRITE, \
|
||||
"VIRTIO_CONSOLE_F_EMERG_WRITE: Emergency write supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-gpu features mapping */
|
||||
qmp_virtio_feature_map_t virtio_gpu_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_GPU_F_VIRGL, \
|
||||
"VIRTIO_GPU_F_VIRGL: Virgl 3D mode supported"),
|
||||
FEATURE_ENTRY(VIRTIO_GPU_F_EDID, \
|
||||
"VIRTIO_GPU_F_EDID: EDID metadata supported"),
|
||||
FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_UUID, \
|
||||
"VIRTIO_GPU_F_RESOURCE_UUID: Resource UUID assigning supported"),
|
||||
FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_BLOB, \
|
||||
"VIRTIO_GPU_F_RESOURCE_BLOB: Size-based blob resources supported"),
|
||||
FEATURE_ENTRY(VIRTIO_GPU_F_CONTEXT_INIT, \
|
||||
"VIRTIO_GPU_F_CONTEXT_INIT: Context types and synchronization "
|
||||
"timelines supported"),
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-input features mapping */
|
||||
qmp_virtio_feature_map_t virtio_input_feature_map[] = {
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-net features mapping */
|
||||
qmp_virtio_feature_map_t virtio_net_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CSUM, \
|
||||
"VIRTIO_NET_F_CSUM: Device handling packets with partial checksum "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_CSUM, \
|
||||
"VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial "
|
||||
"checksum supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
|
||||
"VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading "
|
||||
"reconfig. supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_MTU, \
|
||||
"VIRTIO_NET_F_MTU: Device max MTU reporting supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_MAC, \
|
||||
"VIRTIO_NET_F_MAC: Device has given MAC address"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO4, \
|
||||
"VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO6, \
|
||||
"VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ECN, \
|
||||
"VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_UFO, \
|
||||
"VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO4, \
|
||||
"VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO6, \
|
||||
"VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_HOST_ECN, \
|
||||
"VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_HOST_UFO, \
|
||||
"VIRTIO_NET_F_HOST_UFO: Device can receive UFO"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_MRG_RXBUF, \
|
||||
"VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_STATUS, \
|
||||
"VIRTIO_NET_F_STATUS: Configuration status field available"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VQ, \
|
||||
"VIRTIO_NET_F_CTRL_VQ: Control channel available"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX, \
|
||||
"VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VLAN, \
|
||||
"VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX_EXTRA, \
|
||||
"VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ANNOUNCE, \
|
||||
"VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_MQ, \
|
||||
"VIRTIO_NET_F_MQ: Multiqueue with automatic receive steering "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_CTRL_MAC_ADDR, \
|
||||
"VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control "
|
||||
"channel"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_HASH_REPORT, \
|
||||
"VIRTIO_NET_F_HASH_REPORT: Hash reporting supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_RSS, \
|
||||
"VIRTIO_NET_F_RSS: RSS RX steering supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_RSC_EXT, \
|
||||
"VIRTIO_NET_F_RSC_EXT: Extended coalescing info supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_STANDBY, \
|
||||
"VIRTIO_NET_F_STANDBY: Device acting as standby for primary "
|
||||
"device with same MAC addr. supported"),
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_SPEED_DUPLEX, \
|
||||
"VIRTIO_NET_F_SPEED_DUPLEX: Device set linkspeed and duplex"),
|
||||
#ifndef VIRTIO_NET_NO_LEGACY
|
||||
FEATURE_ENTRY(VIRTIO_NET_F_GSO, \
|
||||
"VIRTIO_NET_F_GSO: Handling GSO-type packets supported"),
|
||||
#endif /* !VIRTIO_NET_NO_LEGACY */
|
||||
FEATURE_ENTRY(VHOST_NET_F_VIRTIO_NET_HDR, \
|
||||
"VHOST_NET_F_VIRTIO_NET_HDR: Virtio-net headers for RX and TX "
|
||||
"packets supported"),
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-scsi features mapping */
|
||||
qmp_virtio_feature_map_t virtio_scsi_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_SCSI_F_INOUT, \
|
||||
"VIRTIO_SCSI_F_INOUT: Requests including read and writable data "
|
||||
"buffers suppoted"),
|
||||
FEATURE_ENTRY(VIRTIO_SCSI_F_HOTPLUG, \
|
||||
"VIRTIO_SCSI_F_HOTPLUG: Reporting and handling hot-plug events "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VIRTIO_SCSI_F_CHANGE, \
|
||||
"VIRTIO_SCSI_F_CHANGE: Reporting and handling LUN changes "
|
||||
"supported"),
|
||||
FEATURE_ENTRY(VIRTIO_SCSI_F_T10_PI, \
|
||||
"VIRTIO_SCSI_F_T10_PI: T10 info included in request header"),
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio/vhost-user-fs features mapping */
|
||||
qmp_virtio_feature_map_t virtio_fs_feature_map[] = {
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio/vhost-user-i2c features mapping */
|
||||
qmp_virtio_feature_map_t virtio_i2c_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_I2C_F_ZERO_LENGTH_REQUEST, \
|
||||
"VIRTIO_I2C_F_ZERO_LEGNTH_REQUEST: Zero length requests supported"),
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio/vhost-vsock features mapping */
|
||||
qmp_virtio_feature_map_t virtio_vsock_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_VSOCK_F_SEQPACKET, \
|
||||
"VIRTIO_VSOCK_F_SEQPACKET: SOCK_SEQPACKET supported"),
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-balloon features mapping */
|
||||
qmp_virtio_feature_map_t virtio_balloon_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_MUST_TELL_HOST, \
|
||||
"VIRTIO_BALLOON_F_MUST_TELL_HOST: Tell host before reclaiming "
|
||||
"pages"),
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_STATS_VQ, \
|
||||
"VIRTIO_BALLOON_F_STATS_VQ: Guest memory stats VQ available"),
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_DEFLATE_ON_OOM, \
|
||||
"VIRTIO_BALLOON_F_DEFLATE_ON_OOM: Deflate balloon when guest OOM"),
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_FREE_PAGE_HINT, \
|
||||
"VIRTIO_BALLOON_F_FREE_PAGE_HINT: VQ reporting free pages enabled"),
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_PAGE_POISON, \
|
||||
"VIRTIO_BALLOON_F_PAGE_POISON: Guest page poisoning enabled"),
|
||||
FEATURE_ENTRY(VIRTIO_BALLOON_F_REPORTING, \
|
||||
"VIRTIO_BALLOON_F_REPORTING: Page reporting VQ enabled"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-crypto features mapping */
|
||||
qmp_virtio_feature_map_t virtio_crypto_feature_map[] = {
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-iommu features mapping */
|
||||
qmp_virtio_feature_map_t virtio_iommu_feature_map[] = {
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_INPUT_RANGE, \
|
||||
"VIRTIO_IOMMU_F_INPUT_RANGE: Range of available virtual addrs. "
|
||||
"available"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_DOMAIN_RANGE, \
|
||||
"VIRTIO_IOMMU_F_DOMAIN_RANGE: Number of supported domains "
|
||||
"available"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_MAP_UNMAP, \
|
||||
"VIRTIO_IOMMU_F_MAP_UNMAP: Map and unmap requests available"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS, \
|
||||
"VIRTIO_IOMMU_F_BYPASS: Endpoints not attached to domains are in "
|
||||
"bypass mode"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_PROBE, \
|
||||
"VIRTIO_IOMMU_F_PROBE: Probe requests available"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_MMIO, \
|
||||
"VIRTIO_IOMMU_F_MMIO: VIRTIO_IOMMU_MAP_F_MMIO flag available"),
|
||||
FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS_CONFIG, \
|
||||
"VIRTIO_IOMMU_F_BYPASS_CONFIG: Bypass field of IOMMU config "
|
||||
"available"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-mem features mapping */
|
||||
qmp_virtio_feature_map_t virtio_mem_feature_map[] = {
|
||||
#ifndef CONFIG_ACPI
|
||||
FEATURE_ENTRY(VIRTIO_MEM_F_ACPI_PXM, \
|
||||
"VIRTIO_MEM_F_ACPI_PXM: node_id is an ACPI PXM and is valid"),
|
||||
#endif /* !CONFIG_ACPI */
|
||||
FEATURE_ENTRY(VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, \
|
||||
"VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE: Unplugged memory cannot be "
|
||||
"accessed"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/* virtio-rng features mapping */
|
||||
qmp_virtio_feature_map_t virtio_rng_feature_map[] = {
|
||||
FEATURE_ENTRY(VHOST_F_LOG_ALL, \
|
||||
"VHOST_F_LOG_ALL: Logging write descriptors supported"),
|
||||
FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
|
||||
"VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
|
||||
"negotiation supported"),
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
/*
|
||||
* The alignment to use between consumer and producer parts of vring.
|
||||
* x86 pagesize again. This is the default, used by transports like PCI
|
||||
|
@ -2551,195 +2145,6 @@ void virtio_reset(void *opaque)
|
|||
}
|
||||
}
|
||||
|
||||
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint8_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = ldub_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint16_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = lduw_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint32_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = ldl_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint8_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stb_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint16_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stw_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint32_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stl_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint8_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = ldub_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint16_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = lduw_le_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint32_t val;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
k->get_config(vdev, vdev->config);
|
||||
|
||||
val = ldl_le_p(vdev->config + addr);
|
||||
return val;
|
||||
}
|
||||
|
||||
void virtio_config_modern_writeb(VirtIODevice *vdev,
|
||||
uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint8_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stb_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_config_modern_writew(VirtIODevice *vdev,
|
||||
uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint16_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stw_le_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_config_modern_writel(VirtIODevice *vdev,
|
||||
uint32_t addr, uint32_t data)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
uint32_t val = data;
|
||||
|
||||
if (addr + sizeof(val) > vdev->config_len) {
|
||||
return;
|
||||
}
|
||||
|
||||
stl_le_p(vdev->config + addr, val);
|
||||
|
||||
if (k->set_config) {
|
||||
k->set_config(vdev, vdev->config);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
|
||||
{
|
||||
if (!vdev->vq[n].vring.num) {
|
||||
|
@ -4457,203 +3862,6 @@ static VirtIODevice *virtio_device_find(const char *path)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#define CONVERT_FEATURES(type, map, is_status, bitmap) \
|
||||
({ \
|
||||
type *list = NULL; \
|
||||
type *node; \
|
||||
for (i = 0; map[i].virtio_bit != -1; i++) { \
|
||||
if (is_status) { \
|
||||
bit = map[i].virtio_bit; \
|
||||
} \
|
||||
else { \
|
||||
bit = 1ULL << map[i].virtio_bit; \
|
||||
} \
|
||||
if ((bitmap & bit) == 0) { \
|
||||
continue; \
|
||||
} \
|
||||
node = g_new0(type, 1); \
|
||||
node->value = g_strdup(map[i].feature_desc); \
|
||||
node->next = list; \
|
||||
list = node; \
|
||||
bitmap ^= bit; \
|
||||
} \
|
||||
list; \
|
||||
})
|
||||
|
||||
static VirtioDeviceStatus *qmp_decode_status(uint8_t bitmap)
|
||||
{
|
||||
VirtioDeviceStatus *status;
|
||||
uint8_t bit;
|
||||
int i;
|
||||
|
||||
status = g_new0(VirtioDeviceStatus, 1);
|
||||
status->statuses = CONVERT_FEATURES(strList, virtio_config_status_map,
|
||||
1, bitmap);
|
||||
status->has_unknown_statuses = bitmap != 0;
|
||||
if (status->has_unknown_statuses) {
|
||||
status->unknown_statuses = bitmap;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static VhostDeviceProtocols *qmp_decode_protocols(uint64_t bitmap)
|
||||
{
|
||||
VhostDeviceProtocols *vhu_protocols;
|
||||
uint64_t bit;
|
||||
int i;
|
||||
|
||||
vhu_protocols = g_new0(VhostDeviceProtocols, 1);
|
||||
vhu_protocols->protocols =
|
||||
CONVERT_FEATURES(strList,
|
||||
vhost_user_protocol_map, 0, bitmap);
|
||||
vhu_protocols->has_unknown_protocols = bitmap != 0;
|
||||
if (vhu_protocols->has_unknown_protocols) {
|
||||
vhu_protocols->unknown_protocols = bitmap;
|
||||
}
|
||||
|
||||
return vhu_protocols;
|
||||
}
|
||||
|
||||
static VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id,
|
||||
uint64_t bitmap)
|
||||
{
|
||||
VirtioDeviceFeatures *features;
|
||||
uint64_t bit;
|
||||
int i;
|
||||
|
||||
features = g_new0(VirtioDeviceFeatures, 1);
|
||||
features->has_dev_features = true;
|
||||
|
||||
/* transport features */
|
||||
features->transports = CONVERT_FEATURES(strList, virtio_transport_map, 0,
|
||||
bitmap);
|
||||
|
||||
/* device features */
|
||||
switch (device_id) {
|
||||
#ifdef CONFIG_VIRTIO_SERIAL
|
||||
case VIRTIO_ID_CONSOLE:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_serial_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_BLK
|
||||
case VIRTIO_ID_BLOCK:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_blk_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_GPU
|
||||
case VIRTIO_ID_GPU:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_gpu_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_NET
|
||||
case VIRTIO_ID_NET:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_net_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_SCSI
|
||||
case VIRTIO_ID_SCSI:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_scsi_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_BALLOON
|
||||
case VIRTIO_ID_BALLOON:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_balloon_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_IOMMU
|
||||
case VIRTIO_ID_IOMMU:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_iommu_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_INPUT
|
||||
case VIRTIO_ID_INPUT:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_input_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VHOST_USER_FS
|
||||
case VIRTIO_ID_FS:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_fs_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VHOST_VSOCK
|
||||
case VIRTIO_ID_VSOCK:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_vsock_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_CRYPTO
|
||||
case VIRTIO_ID_CRYPTO:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_crypto_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_MEM
|
||||
case VIRTIO_ID_MEM:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_mem_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_I2C_ADAPTER
|
||||
case VIRTIO_ID_I2C_ADAPTER:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_i2c_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VIRTIO_RNG
|
||||
case VIRTIO_ID_RNG:
|
||||
features->dev_features =
|
||||
CONVERT_FEATURES(strList, virtio_rng_feature_map, 0, bitmap);
|
||||
break;
|
||||
#endif
|
||||
/* No features */
|
||||
case VIRTIO_ID_9P:
|
||||
case VIRTIO_ID_PMEM:
|
||||
case VIRTIO_ID_IOMEM:
|
||||
case VIRTIO_ID_RPMSG:
|
||||
case VIRTIO_ID_CLOCK:
|
||||
case VIRTIO_ID_MAC80211_WLAN:
|
||||
case VIRTIO_ID_MAC80211_HWSIM:
|
||||
case VIRTIO_ID_RPROC_SERIAL:
|
||||
case VIRTIO_ID_MEMORY_BALLOON:
|
||||
case VIRTIO_ID_CAIF:
|
||||
case VIRTIO_ID_SIGNAL_DIST:
|
||||
case VIRTIO_ID_PSTORE:
|
||||
case VIRTIO_ID_SOUND:
|
||||
case VIRTIO_ID_BT:
|
||||
case VIRTIO_ID_RPMB:
|
||||
case VIRTIO_ID_VIDEO_ENCODER:
|
||||
case VIRTIO_ID_VIDEO_DECODER:
|
||||
case VIRTIO_ID_SCMI:
|
||||
case VIRTIO_ID_NITRO_SEC_MOD:
|
||||
case VIRTIO_ID_WATCHDOG:
|
||||
case VIRTIO_ID_CAN:
|
||||
case VIRTIO_ID_DMABUF:
|
||||
case VIRTIO_ID_PARAM_SERV:
|
||||
case VIRTIO_ID_AUDIO_POLICY:
|
||||
case VIRTIO_ID_GPIO:
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
features->has_unknown_dev_features = bitmap != 0;
|
||||
if (features->has_unknown_dev_features) {
|
||||
features->unknown_dev_features = bitmap;
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp)
|
||||
{
|
||||
VirtIODevice *vdev;
|
||||
|
|
|
@ -16,3 +16,7 @@ spapr_watchdog_stop(uint64_t num, uint64_t ret) "num=%" PRIu64 " ret=%" PRId64
|
|||
spapr_watchdog_query(uint64_t caps) "caps=0x%" PRIx64
|
||||
spapr_watchdog_query_lpm(uint64_t caps) "caps=0x%" PRIx64
|
||||
spapr_watchdog_expired(uint64_t num, unsigned action) "num=%" PRIu64 " action=%u"
|
||||
|
||||
# watchdog.c
|
||||
watchdog_perform_action(unsigned int action) "action=%u"
|
||||
watchdog_set_action(unsigned int action) "action=%u"
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "sysemu/watchdog.h"
|
||||
#include "hw/nmi.h"
|
||||
#include "qemu/help_option.h"
|
||||
#include "trace.h"
|
||||
|
||||
static WatchdogAction watchdog_action = WATCHDOG_ACTION_RESET;
|
||||
|
||||
|
@ -43,6 +44,8 @@ WatchdogAction get_watchdog_action(void)
|
|||
*/
|
||||
void watchdog_perform_action(void)
|
||||
{
|
||||
trace_watchdog_perform_action(watchdog_action);
|
||||
|
||||
switch (watchdog_action) {
|
||||
case WATCHDOG_ACTION_RESET: /* same as 'system_reset' in monitor */
|
||||
qapi_event_send_watchdog(WATCHDOG_ACTION_RESET);
|
||||
|
@ -89,4 +92,5 @@ void watchdog_perform_action(void)
|
|||
void qmp_watchdog_set_action(WatchdogAction action, Error **errp)
|
||||
{
|
||||
watchdog_action = action;
|
||||
trace_watchdog_set_action(watchdog_action);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include "hw/acpi/pcihp.h"
|
||||
#include "hw/acpi/memory_hotplug.h"
|
||||
#include "hw/acpi/acpi_dev_interface.h"
|
||||
#include "hw/acpi/tco.h"
|
||||
#include "hw/acpi/ich9_tco.h"
|
||||
|
||||
#define ACPI_PCIHP_ADDR_ICH9 0x0cc0
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* QEMU ICH9 TCO emulation
|
||||
* QEMU ICH9 TCO emulation (total cost of ownership)
|
||||
*
|
||||
* Copyright (c) 2015 Paulo Alcantara <pcacjr@zytor.com>
|
||||
*
|
|
@ -379,6 +379,9 @@ struct MachineState {
|
|||
} \
|
||||
type_init(machine_initfn##_register_types)
|
||||
|
||||
extern GlobalProperty hw_compat_7_2[];
|
||||
extern const size_t hw_compat_7_2_len;
|
||||
|
||||
extern GlobalProperty hw_compat_7_1[];
|
||||
extern const size_t hw_compat_7_1_len;
|
||||
|
||||
|
|
|
@ -200,6 +200,9 @@ void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
|
|||
/* sgx.c */
|
||||
void pc_machine_init_sgx_epc(PCMachineState *pcms);
|
||||
|
||||
extern GlobalProperty pc_compat_7_2[];
|
||||
extern const size_t pc_compat_7_2_len;
|
||||
|
||||
extern GlobalProperty pc_compat_7_1[];
|
||||
extern const size_t pc_compat_7_1_len;
|
||||
|
||||
|
|
|
@ -251,15 +251,7 @@ struct PCIDeviceClass {
|
|||
uint16_t subsystem_vendor_id; /* only for header type = 0 */
|
||||
uint16_t subsystem_id; /* only for header type = 0 */
|
||||
|
||||
/*
|
||||
* pci-to-pci bridge or normal device.
|
||||
* This doesn't mean pci host switch.
|
||||
* When card bus bridge is supported, this would be enhanced.
|
||||
*/
|
||||
bool is_bridge;
|
||||
|
||||
/* rom bar */
|
||||
const char *romfile;
|
||||
const char *romfile; /* rom bar */
|
||||
};
|
||||
|
||||
typedef void (*PCIINTxRoutingNotifier)(PCIDevice *dev);
|
||||
|
|
|
@ -53,6 +53,7 @@ struct PCIBridgeWindows {
|
|||
|
||||
#define TYPE_PCI_BRIDGE "base-pci-bridge"
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(PCIBridge, PCI_BRIDGE)
|
||||
#define IS_PCI_BRIDGE(dev) object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)
|
||||
|
||||
struct PCIBridge {
|
||||
/*< private >*/
|
||||
|
|
|
@ -169,7 +169,6 @@
|
|||
|
||||
#define PCI_VENDOR_ID_DEC 0x1011
|
||||
#define PCI_DEVICE_ID_DEC_21143 0x0019
|
||||
#define PCI_DEVICE_ID_DEC_21154 0x0026
|
||||
|
||||
#define PCI_VENDOR_ID_CIRRUS 0x1013
|
||||
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Vhost Vdpa Device
|
||||
*
|
||||
* Copyright (c) Huawei Technologies Co., Ltd. 2022. All Rights Reserved.
|
||||
*
|
||||
* Authors:
|
||||
* Longpeng <longpeng2@huawei.com>
|
||||
*
|
||||
* Largely based on the "vhost-user-blk.h" implemented by:
|
||||
* Changpeng Liu <changpeng.liu@intel.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
||||
* See the COPYING.LIB file in the top-level directory.
|
||||
*/
|
||||
#ifndef _VHOST_VDPA_DEVICE_H
|
||||
#define _VHOST_VDPA_DEVICE_H
|
||||
|
||||
#include "hw/virtio/vhost.h"
|
||||
#include "hw/virtio/vhost-vdpa.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
|
||||
#define TYPE_VHOST_VDPA_DEVICE "vhost-vdpa-device"
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(VhostVdpaDevice, VHOST_VDPA_DEVICE)
|
||||
|
||||
struct VhostVdpaDevice {
|
||||
VirtIODevice parent_obj;
|
||||
char *vhostdev;
|
||||
int vhostfd;
|
||||
int32_t bootindex;
|
||||
uint32_t vdev_id;
|
||||
uint32_t num_queues;
|
||||
struct vhost_dev dev;
|
||||
struct vhost_vdpa vdpa;
|
||||
VirtQueue **virtqs;
|
||||
uint8_t *config;
|
||||
int config_size;
|
||||
uint16_t queue_size;
|
||||
bool started;
|
||||
int (*post_init)(VhostVdpaDevice *v, Error **errp);
|
||||
};
|
||||
|
||||
#endif
|
|
@ -19,6 +19,12 @@
|
|||
#include "hw/virtio/virtio.h"
|
||||
#include "standard-headers/linux/vhost_types.h"
|
||||
|
||||
/*
|
||||
* ASID dedicated to map guest's addresses. If SVQ is disabled it maps GPA to
|
||||
* qemu's IOVA. If SVQ is enabled it maps also the SVQ vring here
|
||||
*/
|
||||
#define VHOST_VDPA_GUEST_PA_ASID 0
|
||||
|
||||
typedef struct VhostVDPAHostNotifier {
|
||||
MemoryRegion mr;
|
||||
void *addr;
|
||||
|
@ -29,10 +35,13 @@ typedef struct vhost_vdpa {
|
|||
int index;
|
||||
uint32_t msg_type;
|
||||
bool iotlb_batch_begin_sent;
|
||||
uint32_t address_space_id;
|
||||
MemoryListener listener;
|
||||
struct vhost_vdpa_iova_range iova_range;
|
||||
uint64_t acked_features;
|
||||
bool shadow_vqs_enabled;
|
||||
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
|
||||
bool shadow_data;
|
||||
/* IOVA mapping used by the Shadow Virtqueue */
|
||||
VhostIOVATree *iova_tree;
|
||||
GPtrArray *shadow_vqs;
|
||||
|
@ -42,8 +51,9 @@ typedef struct vhost_vdpa {
|
|||
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
|
||||
} VhostVDPA;
|
||||
|
||||
int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
|
||||
void *vaddr, bool readonly);
|
||||
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size);
|
||||
int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
||||
hwaddr size, void *vaddr, bool readonly);
|
||||
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
||||
hwaddr size);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -88,13 +88,32 @@ struct vhost_dev {
|
|||
int vq_index_end;
|
||||
/* if non-zero, minimum required value for max_queues */
|
||||
int num_queues;
|
||||
/**
|
||||
* vhost feature handling requires matching the feature set
|
||||
* offered by a backend which may be a subset of the total
|
||||
* features eventually offered to the guest.
|
||||
*
|
||||
* @features: available features provided by the backend
|
||||
* @acked_features: final negotiated features with front-end driver
|
||||
*
|
||||
* @backend_features: this is used in a couple of places to either
|
||||
* store VHOST_USER_F_PROTOCOL_FEATURES to apply to
|
||||
* VHOST_USER_SET_FEATURES or VHOST_NET_F_VIRTIO_NET_HDR. Its
|
||||
* future use should be discouraged and the variable retired as
|
||||
* its easy to confuse with the VirtIO backend_features.
|
||||
*/
|
||||
uint64_t features;
|
||||
/** @acked_features: final set of negotiated features */
|
||||
uint64_t acked_features;
|
||||
/** @backend_features: backend specific feature bits */
|
||||
uint64_t backend_features;
|
||||
/** @protocol_features: final negotiated protocol features */
|
||||
|
||||
/**
|
||||
* @protocol_features: is the vhost-user only feature set by
|
||||
* VHOST_USER_SET_PROTOCOL_FEATURES. Protocol features are only
|
||||
* negotiated if VHOST_USER_F_PROTOCOL_FEATURES has been offered
|
||||
* by the backend (see @features).
|
||||
*/
|
||||
uint64_t protocol_features;
|
||||
|
||||
uint64_t max_queues;
|
||||
uint64_t backend_cap;
|
||||
/* @started: is the vhost device started? */
|
||||
|
|
|
@ -151,6 +151,8 @@ struct VirtIOPCIProxy {
|
|||
bool disable_modern;
|
||||
bool ignore_backend_features;
|
||||
OnOffAuto disable_legacy;
|
||||
/* Transitional device id */
|
||||
uint16_t trans_devid;
|
||||
uint32_t class_code;
|
||||
uint32_t nvectors;
|
||||
uint32_t dfselect;
|
||||
|
@ -184,6 +186,9 @@ static inline void virtio_pci_disable_modern(VirtIOPCIProxy *proxy)
|
|||
proxy->disable_modern = true;
|
||||
}
|
||||
|
||||
uint16_t virtio_pci_get_trans_devid(uint16_t device_id);
|
||||
uint16_t virtio_pci_get_class_id(uint16_t device_id);
|
||||
|
||||
/*
|
||||
* virtio-input-pci: This extends VirtioPCIProxy.
|
||||
*/
|
||||
|
|
|
@ -93,6 +93,12 @@ enum virtio_device_endian {
|
|||
VIRTIO_DEVICE_ENDIAN_BIG,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct VirtIODevice - common VirtIO structure
|
||||
* @name: name of the device
|
||||
* @status: VirtIO Device Status field
|
||||
*
|
||||
*/
|
||||
struct VirtIODevice
|
||||
{
|
||||
DeviceState parent_obj;
|
||||
|
@ -100,9 +106,20 @@ struct VirtIODevice
|
|||
uint8_t status;
|
||||
uint8_t isr;
|
||||
uint16_t queue_sel;
|
||||
uint64_t guest_features;
|
||||
/**
|
||||
* These fields represent a set of VirtIO features at various
|
||||
* levels of the stack. @host_features indicates the complete
|
||||
* feature set the VirtIO device can offer to the driver.
|
||||
* @guest_features indicates which features the VirtIO driver has
|
||||
* selected by writing to the feature register. Finally
|
||||
* @backend_features represents everything supported by the
|
||||
* backend (e.g. vhost) and could potentially be a subset of the
|
||||
* total feature set offered by QEMU.
|
||||
*/
|
||||
uint64_t host_features;
|
||||
uint64_t guest_features;
|
||||
uint64_t backend_features;
|
||||
|
||||
size_t config_len;
|
||||
void *config;
|
||||
uint16_t config_vector;
|
||||
|
|
174
net/vhost-vdpa.c
174
net/vhost-vdpa.c
|
@ -38,6 +38,8 @@ typedef struct VhostVDPAState {
|
|||
void *cvq_cmd_out_buffer;
|
||||
virtio_net_ctrl_ack *status;
|
||||
|
||||
/* The device always have SVQ enabled */
|
||||
bool always_svq;
|
||||
bool started;
|
||||
} VhostVDPAState;
|
||||
|
||||
|
@ -100,6 +102,8 @@ static const uint64_t vdpa_svq_device_features =
|
|||
BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
|
||||
BIT_ULL(VIRTIO_NET_F_STANDBY);
|
||||
|
||||
#define VHOST_VDPA_NET_CVQ_ASID 1
|
||||
|
||||
VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
|
||||
{
|
||||
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
|
||||
|
@ -107,6 +111,23 @@ VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
|
|||
return s->vhost_net;
|
||||
}
|
||||
|
||||
static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
|
||||
{
|
||||
uint64_t invalid_dev_features =
|
||||
features & ~vdpa_svq_device_features &
|
||||
/* Transport are all accepted at this point */
|
||||
~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
|
||||
VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
|
||||
|
||||
if (invalid_dev_features) {
|
||||
error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
|
||||
invalid_dev_features);
|
||||
return false;
|
||||
}
|
||||
|
||||
return vhost_svq_valid_features(features, errp);
|
||||
}
|
||||
|
||||
static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
|
||||
{
|
||||
uint32_t device_id;
|
||||
|
@ -224,6 +245,40 @@ static NetClientInfo net_vhost_vdpa_info = {
|
|||
.check_peer_type = vhost_vdpa_check_peer_type,
|
||||
};
|
||||
|
||||
static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index)
|
||||
{
|
||||
struct vhost_vring_state state = {
|
||||
.index = vq_index,
|
||||
};
|
||||
int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
|
||||
|
||||
if (unlikely(r < 0)) {
|
||||
error_report("Cannot get VQ %u group: %s", vq_index,
|
||||
g_strerror(errno));
|
||||
return r;
|
||||
}
|
||||
|
||||
return state.num;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
|
||||
unsigned vq_group,
|
||||
unsigned asid_num)
|
||||
{
|
||||
struct vhost_vring_state asid = {
|
||||
.index = vq_group,
|
||||
.num = asid_num,
|
||||
};
|
||||
int r;
|
||||
|
||||
r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
|
||||
if (unlikely(r < 0)) {
|
||||
error_report("Can't set vq group %u asid %u, errno=%d (%s)",
|
||||
asid.index, asid.num, errno, g_strerror(errno));
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
|
||||
{
|
||||
VhostIOVATree *tree = v->iova_tree;
|
||||
|
@ -242,7 +297,7 @@ static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
|
|||
return;
|
||||
}
|
||||
|
||||
r = vhost_vdpa_dma_unmap(v, map->iova, map->size + 1);
|
||||
r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
|
||||
if (unlikely(r != 0)) {
|
||||
error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
|
||||
}
|
||||
|
@ -282,8 +337,8 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
|
|||
return r;
|
||||
}
|
||||
|
||||
r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf,
|
||||
!write);
|
||||
r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
|
||||
vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
|
||||
if (unlikely(r < 0)) {
|
||||
goto dma_map_err;
|
||||
}
|
||||
|
@ -298,11 +353,75 @@ dma_map_err:
|
|||
static int vhost_vdpa_net_cvq_start(NetClientState *nc)
|
||||
{
|
||||
VhostVDPAState *s;
|
||||
int r;
|
||||
struct vhost_vdpa *v;
|
||||
uint64_t backend_features;
|
||||
int64_t cvq_group;
|
||||
int cvq_index, r;
|
||||
|
||||
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
|
||||
|
||||
s = DO_UPCAST(VhostVDPAState, nc, nc);
|
||||
v = &s->vhost_vdpa;
|
||||
|
||||
v->shadow_data = s->always_svq;
|
||||
v->shadow_vqs_enabled = s->always_svq;
|
||||
s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
|
||||
|
||||
if (s->always_svq) {
|
||||
/* SVQ is already configured for all virtqueues */
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we early return in these cases SVQ will not be enabled. The migration
|
||||
* will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
|
||||
*
|
||||
* Calling VHOST_GET_BACKEND_FEATURES as they are not available in v->dev
|
||||
* yet.
|
||||
*/
|
||||
r = ioctl(v->device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
|
||||
if (unlikely(r < 0)) {
|
||||
error_report("Cannot get vdpa backend_features: %s(%d)",
|
||||
g_strerror(errno), errno);
|
||||
return -1;
|
||||
}
|
||||
if (!(backend_features & VHOST_BACKEND_F_IOTLB_ASID) ||
|
||||
!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if all the virtqueues of the virtio device are in a different vq
|
||||
* than the last vq. VQ group of last group passed in cvq_group.
|
||||
*/
|
||||
cvq_index = v->dev->vq_index_end - 1;
|
||||
cvq_group = vhost_vdpa_get_vring_group(v->device_fd, cvq_index);
|
||||
if (unlikely(cvq_group < 0)) {
|
||||
return cvq_group;
|
||||
}
|
||||
for (int i = 0; i < cvq_index; ++i) {
|
||||
int64_t group = vhost_vdpa_get_vring_group(v->device_fd, i);
|
||||
|
||||
if (unlikely(group < 0)) {
|
||||
return group;
|
||||
}
|
||||
|
||||
if (group == cvq_group) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
|
||||
if (unlikely(r < 0)) {
|
||||
return r;
|
||||
}
|
||||
|
||||
v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
|
||||
v->iova_range.last);
|
||||
v->shadow_vqs_enabled = true;
|
||||
s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
|
||||
|
||||
out:
|
||||
if (!s->vhost_vdpa.shadow_vqs_enabled) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -331,6 +450,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
|
|||
if (s->vhost_vdpa.shadow_vqs_enabled) {
|
||||
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
|
||||
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
|
||||
if (!s->always_svq) {
|
||||
/*
|
||||
* If only the CVQ is shadowed we can delete this safely.
|
||||
* If all the VQs are shadows this will be needed by the time the
|
||||
* device is started again to register SVQ vrings and similar.
|
||||
*/
|
||||
g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -525,14 +652,15 @@ static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
|
|||
};
|
||||
|
||||
static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
|
||||
const char *device,
|
||||
const char *name,
|
||||
int vdpa_device_fd,
|
||||
int queue_pair_index,
|
||||
int nvqs,
|
||||
bool is_datapath,
|
||||
bool svq,
|
||||
VhostIOVATree *iova_tree)
|
||||
const char *device,
|
||||
const char *name,
|
||||
int vdpa_device_fd,
|
||||
int queue_pair_index,
|
||||
int nvqs,
|
||||
bool is_datapath,
|
||||
bool svq,
|
||||
struct vhost_vdpa_iova_range iova_range,
|
||||
VhostIOVATree *iova_tree)
|
||||
{
|
||||
NetClientState *nc = NULL;
|
||||
VhostVDPAState *s;
|
||||
|
@ -550,7 +678,10 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
|
|||
|
||||
s->vhost_vdpa.device_fd = vdpa_device_fd;
|
||||
s->vhost_vdpa.index = queue_pair_index;
|
||||
s->always_svq = svq;
|
||||
s->vhost_vdpa.shadow_vqs_enabled = svq;
|
||||
s->vhost_vdpa.iova_range = iova_range;
|
||||
s->vhost_vdpa.shadow_data = svq;
|
||||
s->vhost_vdpa.iova_tree = iova_tree;
|
||||
if (!is_datapath) {
|
||||
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
|
||||
|
@ -630,6 +761,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
|||
int vdpa_device_fd;
|
||||
g_autofree NetClientState **ncs = NULL;
|
||||
g_autoptr(VhostIOVATree) iova_tree = NULL;
|
||||
struct vhost_vdpa_iova_range iova_range;
|
||||
NetClientState *nc;
|
||||
int queue_pairs, r, i = 0, has_cvq = 0;
|
||||
|
||||
|
@ -673,22 +805,12 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
|||
return queue_pairs;
|
||||
}
|
||||
|
||||
vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
|
||||
if (opts->x_svq) {
|
||||
struct vhost_vdpa_iova_range iova_range;
|
||||
|
||||
uint64_t invalid_dev_features =
|
||||
features & ~vdpa_svq_device_features &
|
||||
/* Transport are all accepted at this point */
|
||||
~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
|
||||
VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
|
||||
|
||||
if (invalid_dev_features) {
|
||||
error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
|
||||
invalid_dev_features);
|
||||
if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
|
||||
goto err_svq;
|
||||
}
|
||||
|
||||
vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
|
||||
iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
|
||||
}
|
||||
|
||||
|
@ -697,7 +819,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
|||
for (i = 0; i < queue_pairs; i++) {
|
||||
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
|
||||
vdpa_device_fd, i, 2, true, opts->x_svq,
|
||||
iova_tree);
|
||||
iova_range, iova_tree);
|
||||
if (!ncs[i])
|
||||
goto err;
|
||||
}
|
||||
|
@ -705,7 +827,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
|
|||
if (has_cvq) {
|
||||
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
|
||||
vdpa_device_fd, i, 1, false,
|
||||
opts->x_svq, iova_tree);
|
||||
opts->x_svq, iova_range, iova_tree);
|
||||
if (!nc)
|
||||
goto err;
|
||||
}
|
||||
|
|
|
@ -343,7 +343,7 @@ typedef struct VuVirtq {
|
|||
/* Notification enabled? */
|
||||
bool notification;
|
||||
|
||||
int inuse;
|
||||
unsigned int inuse;
|
||||
|
||||
vu_queue_handler_cb handler;
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ from qemu.machine import QEMUMachine
|
|||
from avocado import skipIf
|
||||
from avocado_qemu import QemuBaseTest
|
||||
|
||||
deps = ["xorriso"] # dependent tools needed in the test setup/box.
|
||||
deps = ["xorriso", "mformat"] # dependent tools needed in the test setup/box.
|
||||
supported_platforms = ['x86_64'] # supported test platforms.
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
# Copyright (c) 2015, Intel Corporation
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
|
@ -24,6 +26,8 @@
|
|||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# This script runs only from the biosbits VM.
|
||||
|
||||
"""SMBIOS/DMI module."""
|
||||
|
||||
import bits
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
# Copyright (c) 2015, Intel Corporation
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
|
@ -24,6 +26,8 @@
|
|||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# This script runs only from the biosbits VM.
|
||||
|
||||
"""Tests for ACPI"""
|
||||
|
||||
import acpi
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
# Copyright (c) 2012, Intel Corporation
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
|
@ -24,6 +26,8 @@
|
|||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# This script runs only from the biosbits VM.
|
||||
|
||||
"""Tests and helpers for CPUID."""
|
||||
|
||||
import bits
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include "hw/pci/pci_regs.h"
|
||||
#include "hw/i386/ich9.h"
|
||||
#include "hw/acpi/ich9.h"
|
||||
#include "hw/acpi/tco.h"
|
||||
#include "hw/acpi/ich9_tco.h"
|
||||
|
||||
#define RCBA_BASE_ADDR 0xfed1c000
|
||||
#define PM_IO_BASE_ADDR 0xb000
|
||||
|
@ -60,7 +60,7 @@ static void test_init(TestData *d)
|
|||
QTestState *qs;
|
||||
|
||||
qs = qtest_initf("-machine q35 %s %s",
|
||||
d->noreboot ? "" : "-global ICH9-LPC.noreboot=false",
|
||||
d->noreboot ? "-global ICH9-LPC.noreboot=true" : "",
|
||||
!d->args ? "" : d->args);
|
||||
qtest_irq_intercept_in(qs, "ioapic");
|
||||
|
||||
|
|
Loading…
Reference in New Issue