mirror of https://github.com/xemu-project/xemu.git
virtio, vhost: fixes for 2.5
This fixes a performance regression with virtio 1, and makes device stop/start more robust for vhost-user. virtio devices on pcie bus now have pcie and pm capability, as required by the PCI Express spec. migration now works better with virtio 9p. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJWRKTqAAoJECgfDbjSjVRpHCsH/3HBYev+wwUxbxh2Z/TVTadK uAOskZN5etuvl9guyLnE8iBYMHpDY/mHUrYEb7p/Yawmi7VWy3A/3ZMGJuMML3hq /jM7oKhJyaBaHzVUX2WOIktiHihZxk8+Bwgmc8Ho301Mg5pnBiwdbEVhZU7BC/0g M4I98EEW42u3t2zRo6IwOuEaiErevTdSd2ugmP37i25x3/VWcP7r0475NRnD0EcH /kUDM0Kb75EOWWSj+aBDySBOWR//kW7t8KpcERolUslmf25os/fGoGMlpY5nr3M0 fraFXsKR3lvjUPZjQ9JUneOD3LaPdV+DjBh5mG+ob3MmO/dGUI3FkFaK1y+7ckA= =RCs3 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging virtio, vhost: fixes for 2.5 This fixes a performance regression with virtio 1, and makes device stop/start more robust for vhost-user. virtio devices on pcie bus now have pcie and pm capability, as required by the PCI Express spec. migration now works better with virtio 9p. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Thu 12 Nov 2015 14:40:42 GMT using RSA key ID D28D5469 # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" * remotes/mst/tags/for_upstream: virtio-9p: add savem handlers hw/virtio: Add PCIe capability to virtio devices vhost: send SET_VRING_ENABLE at start/stop vhost: rename RESET_DEVICE backto RESET_OWNER vhost-user: modify SET_LOG_BASE to pass mmap size and offset virtio-pci: unbreak queue_enable read virtio-pci: introduce pio notification capability for modern device virtio-pci: use zero length mmio eventfd for 1.0 notification cap when possible KVM: add support for any length io eventfd memory: don't try to adjust endianness for zero length eventfd virtio-pci: fix 1.0 virtqueue migration Conflicts: include/hw/compat.h [Fixed a trivial merge conflict in compat.h] Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
8f0da01d18
|
@ -98,6 +98,7 @@ typedef struct VhostUserMsg {
|
|||
struct vhost_vring_state state;
|
||||
struct vhost_vring_addr addr;
|
||||
VhostUserMemory memory;
|
||||
VhostUserLog log;
|
||||
};
|
||||
} QEMU_PACKED VhostUserMsg;
|
||||
|
||||
|
@ -255,10 +256,10 @@ Message types
|
|||
as an owner of the session. This can be used on the Slave as a
|
||||
"session start" flag.
|
||||
|
||||
* VHOST_USER_RESET_DEVICE
|
||||
* VHOST_USER_RESET_OWNER
|
||||
|
||||
Id: 4
|
||||
Equivalent ioctl: VHOST_RESET_DEVICE
|
||||
Equivalent ioctl: VHOST_RESET_OWNER
|
||||
Master payload: N/A
|
||||
|
||||
Issued when a new connection is about to be closed. The Master will no
|
||||
|
@ -282,7 +283,12 @@ Message types
|
|||
Master payload: u64
|
||||
Slave payload: N/A
|
||||
|
||||
Sets the logging base address.
|
||||
Sets logging shared memory space.
|
||||
When slave has VHOST_USER_PROTOCOL_F_LOG_SHMFD protocol
|
||||
feature, the log memory fd is provided in the ancillary data of
|
||||
VHOST_USER_SET_LOG_BASE message, the size and offset of shared
|
||||
memory area provided in the message.
|
||||
|
||||
|
||||
* VHOST_USER_SET_LOG_FD
|
||||
|
||||
|
|
|
@ -43,6 +43,16 @@ static void virtio_9p_get_config(VirtIODevice *vdev, uint8_t *config)
|
|||
g_free(cfg);
|
||||
}
|
||||
|
||||
static void virtio_9p_save(QEMUFile *f, void *opaque)
|
||||
{
|
||||
virtio_save(VIRTIO_DEVICE(opaque), f);
|
||||
}
|
||||
|
||||
static int virtio_9p_load(QEMUFile *f, void *opaque, int version_id)
|
||||
{
|
||||
return virtio_load(VIRTIO_DEVICE(opaque), f, version_id);
|
||||
}
|
||||
|
||||
static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
|
@ -130,6 +140,7 @@ static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
|
|||
}
|
||||
v9fs_path_free(&path);
|
||||
|
||||
register_savevm(dev, "virtio-9p", -1, 1, virtio_9p_save, virtio_9p_load, s);
|
||||
return;
|
||||
out:
|
||||
g_free(s->ctx.fs_root);
|
||||
|
|
|
@ -156,7 +156,7 @@ static int vhost_kernel_set_owner(struct vhost_dev *dev)
|
|||
|
||||
static int vhost_kernel_reset_device(struct vhost_dev *dev)
|
||||
{
|
||||
return vhost_kernel_call(dev, VHOST_RESET_DEVICE, NULL);
|
||||
return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
|
||||
}
|
||||
|
||||
static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
|
||||
|
|
|
@ -43,7 +43,7 @@ typedef enum VhostUserRequest {
|
|||
VHOST_USER_GET_FEATURES = 1,
|
||||
VHOST_USER_SET_FEATURES = 2,
|
||||
VHOST_USER_SET_OWNER = 3,
|
||||
VHOST_USER_RESET_DEVICE = 4,
|
||||
VHOST_USER_RESET_OWNER = 4,
|
||||
VHOST_USER_SET_MEM_TABLE = 5,
|
||||
VHOST_USER_SET_LOG_BASE = 6,
|
||||
VHOST_USER_SET_LOG_FD = 7,
|
||||
|
@ -75,6 +75,11 @@ typedef struct VhostUserMemory {
|
|||
VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
|
||||
} VhostUserMemory;
|
||||
|
||||
typedef struct VhostUserLog {
|
||||
uint64_t mmap_size;
|
||||
uint64_t mmap_offset;
|
||||
} VhostUserLog;
|
||||
|
||||
typedef struct VhostUserMsg {
|
||||
VhostUserRequest request;
|
||||
|
||||
|
@ -89,6 +94,7 @@ typedef struct VhostUserMsg {
|
|||
struct vhost_vring_state state;
|
||||
struct vhost_vring_addr addr;
|
||||
VhostUserMemory memory;
|
||||
VhostUserLog log;
|
||||
} payload;
|
||||
} QEMU_PACKED VhostUserMsg;
|
||||
|
||||
|
@ -157,7 +163,7 @@ static bool vhost_user_one_time_request(VhostUserRequest request)
|
|||
{
|
||||
switch (request) {
|
||||
case VHOST_USER_SET_OWNER:
|
||||
case VHOST_USER_RESET_DEVICE:
|
||||
case VHOST_USER_RESET_OWNER:
|
||||
case VHOST_USER_SET_MEM_TABLE:
|
||||
case VHOST_USER_GET_QUEUE_NUM:
|
||||
return true;
|
||||
|
@ -200,8 +206,9 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
|
|||
VhostUserMsg msg = {
|
||||
.request = VHOST_USER_SET_LOG_BASE,
|
||||
.flags = VHOST_USER_VERSION,
|
||||
.payload.u64 = base,
|
||||
.size = sizeof(msg.payload.u64),
|
||||
.payload.log.mmap_size = log->size,
|
||||
.payload.log.mmap_offset = 0,
|
||||
.size = sizeof(msg.payload.log),
|
||||
};
|
||||
|
||||
if (shmfd && log->fd != -1) {
|
||||
|
@ -486,7 +493,7 @@ static int vhost_user_set_owner(struct vhost_dev *dev)
|
|||
static int vhost_user_reset_device(struct vhost_dev *dev)
|
||||
{
|
||||
VhostUserMsg msg = {
|
||||
.request = VHOST_USER_RESET_DEVICE,
|
||||
.request = VHOST_USER_RESET_OWNER,
|
||||
.flags = VHOST_USER_VERSION,
|
||||
};
|
||||
|
||||
|
|
|
@ -1226,6 +1226,11 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
|
|||
}
|
||||
}
|
||||
|
||||
if (hdev->vhost_ops->vhost_set_vring_enable) {
|
||||
/* only enable first vq pair by default */
|
||||
hdev->vhost_ops->vhost_set_vring_enable(hdev, hdev->vq_index == 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail_log:
|
||||
vhost_log_put(hdev, false);
|
||||
|
@ -1256,6 +1261,10 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
|
|||
hdev->vq_index + i);
|
||||
}
|
||||
|
||||
if (hdev->vhost_ops->vhost_set_vring_enable) {
|
||||
hdev->vhost_ops->vhost_set_vring_enable(hdev, 0);
|
||||
}
|
||||
|
||||
vhost_log_put(hdev, true);
|
||||
hdev->started = false;
|
||||
hdev->log = NULL;
|
||||
|
|
|
@ -86,6 +86,129 @@ static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
|
|||
qemu_put_be16(f, vdev->config_vector);
|
||||
}
|
||||
|
||||
static void virtio_pci_load_modern_queue_state(VirtIOPCIQueue *vq,
|
||||
QEMUFile *f)
|
||||
{
|
||||
vq->num = qemu_get_be16(f);
|
||||
vq->enabled = qemu_get_be16(f);
|
||||
vq->desc[0] = qemu_get_be32(f);
|
||||
vq->desc[1] = qemu_get_be32(f);
|
||||
vq->avail[0] = qemu_get_be32(f);
|
||||
vq->avail[1] = qemu_get_be32(f);
|
||||
vq->used[0] = qemu_get_be32(f);
|
||||
vq->used[1] = qemu_get_be32(f);
|
||||
}
|
||||
|
||||
static bool virtio_pci_has_extra_state(DeviceState *d)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
|
||||
return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
|
||||
}
|
||||
|
||||
static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = pv;
|
||||
int i;
|
||||
|
||||
proxy->dfselect = qemu_get_be32(f);
|
||||
proxy->gfselect = qemu_get_be32(f);
|
||||
proxy->guest_features[0] = qemu_get_be32(f);
|
||||
proxy->guest_features[1] = qemu_get_be32(f);
|
||||
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
||||
virtio_pci_load_modern_queue_state(&proxy->vqs[i], f);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq,
|
||||
QEMUFile *f)
|
||||
{
|
||||
qemu_put_be16(f, vq->num);
|
||||
qemu_put_be16(f, vq->enabled);
|
||||
qemu_put_be32(f, vq->desc[0]);
|
||||
qemu_put_be32(f, vq->desc[1]);
|
||||
qemu_put_be32(f, vq->avail[0]);
|
||||
qemu_put_be32(f, vq->avail[1]);
|
||||
qemu_put_be32(f, vq->used[0]);
|
||||
qemu_put_be32(f, vq->used[1]);
|
||||
}
|
||||
|
||||
static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = pv;
|
||||
int i;
|
||||
|
||||
qemu_put_be32(f, proxy->dfselect);
|
||||
qemu_put_be32(f, proxy->gfselect);
|
||||
qemu_put_be32(f, proxy->guest_features[0]);
|
||||
qemu_put_be32(f, proxy->guest_features[1]);
|
||||
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
||||
virtio_pci_save_modern_queue_state(&proxy->vqs[i], f);
|
||||
}
|
||||
}
|
||||
|
||||
static const VMStateInfo vmstate_info_virtio_pci_modern_state = {
|
||||
.name = "virtqueue_state",
|
||||
.get = get_virtio_pci_modern_state,
|
||||
.put = put_virtio_pci_modern_state,
|
||||
};
|
||||
|
||||
static bool virtio_pci_modern_state_needed(void *opaque)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
|
||||
return !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_virtio_pci_modern_state = {
|
||||
.name = "virtio_pci/modern_state",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = &virtio_pci_modern_state_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
{
|
||||
.name = "modern_state",
|
||||
.version_id = 0,
|
||||
.field_exists = NULL,
|
||||
.size = 0,
|
||||
.info = &vmstate_info_virtio_pci_modern_state,
|
||||
.flags = VMS_SINGLE,
|
||||
.offset = 0,
|
||||
},
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_virtio_pci = {
|
||||
.name = "virtio_pci",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.minimum_version_id_old = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
.subsections = (const VMStateDescription*[]) {
|
||||
&vmstate_virtio_pci_modern_state,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
||||
static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
|
||||
vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
|
||||
}
|
||||
|
||||
static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
|
||||
return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
|
||||
}
|
||||
|
||||
static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
|
@ -133,6 +256,7 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
|
|||
if (vector != VIRTIO_NO_VECTOR) {
|
||||
return msix_vector_use(&proxy->pci_dev, vector);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -146,7 +270,10 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
|
|||
EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
|
||||
bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
|
||||
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
|
||||
bool fast_mmio = kvm_ioeventfd_any_length_enabled();
|
||||
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
|
||||
MemoryRegion *modern_mr = &proxy->notify.mr;
|
||||
MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
|
||||
MemoryRegion *legacy_mr = &proxy->bar;
|
||||
hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
|
||||
virtio_get_queue_index(vq);
|
||||
|
@ -162,8 +289,17 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
|
|||
}
|
||||
virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
|
||||
if (modern) {
|
||||
memory_region_add_eventfd(modern_mr, modern_addr, 2,
|
||||
true, n, notifier);
|
||||
if (fast_mmio) {
|
||||
memory_region_add_eventfd(modern_mr, modern_addr, 0,
|
||||
false, n, notifier);
|
||||
} else {
|
||||
memory_region_add_eventfd(modern_mr, modern_addr, 2,
|
||||
false, n, notifier);
|
||||
}
|
||||
if (modern_pio) {
|
||||
memory_region_add_eventfd(modern_notify_mr, 0, 2,
|
||||
true, n, notifier);
|
||||
}
|
||||
}
|
||||
if (legacy) {
|
||||
memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
|
||||
|
@ -171,8 +307,17 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
|
|||
}
|
||||
} else {
|
||||
if (modern) {
|
||||
memory_region_del_eventfd(modern_mr, modern_addr, 2,
|
||||
true, n, notifier);
|
||||
if (fast_mmio) {
|
||||
memory_region_del_eventfd(modern_mr, modern_addr, 0,
|
||||
false, n, notifier);
|
||||
} else {
|
||||
memory_region_del_eventfd(modern_mr, modern_addr, 2,
|
||||
false, n, notifier);
|
||||
}
|
||||
if (modern_pio) {
|
||||
memory_region_del_eventfd(modern_notify_mr, 0, 2,
|
||||
true, n, notifier);
|
||||
}
|
||||
}
|
||||
if (legacy) {
|
||||
memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
|
||||
|
@ -1239,6 +1384,7 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
|
|||
proxy->vqs[vdev->queue_sel].avail[0],
|
||||
((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
|
||||
proxy->vqs[vdev->queue_sel].used[0]);
|
||||
proxy->vqs[vdev->queue_sel].enabled = 1;
|
||||
break;
|
||||
case VIRTIO_PCI_COMMON_Q_DESCLO:
|
||||
proxy->vqs[vdev->queue_sel].desc[0] = val;
|
||||
|
@ -1281,6 +1427,17 @@ static void virtio_pci_notify_write(void *opaque, hwaddr addr,
|
|||
}
|
||||
}
|
||||
|
||||
static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
VirtIODevice *vdev = opaque;
|
||||
unsigned queue = val;
|
||||
|
||||
if (queue < VIRTIO_QUEUE_MAX) {
|
||||
virtio_queue_notify(vdev, queue);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
|
||||
unsigned size)
|
||||
{
|
||||
|
@ -1374,6 +1531,16 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
|
|||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
static const MemoryRegionOps notify_pio_ops = {
|
||||
.read = virtio_pci_notify_read,
|
||||
.write = virtio_pci_notify_write_pio,
|
||||
.impl = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4,
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
|
||||
memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
|
||||
&common_ops,
|
||||
|
@ -1398,30 +1565,60 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
|
|||
virtio_bus_get_device(&proxy->bus),
|
||||
"virtio-pci-notify",
|
||||
proxy->notify.size);
|
||||
|
||||
memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
|
||||
¬ify_pio_ops,
|
||||
virtio_bus_get_device(&proxy->bus),
|
||||
"virtio-pci-notify-pio",
|
||||
proxy->notify.size);
|
||||
}
|
||||
|
||||
static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
|
||||
VirtIOPCIRegion *region,
|
||||
struct virtio_pci_cap *cap)
|
||||
struct virtio_pci_cap *cap,
|
||||
MemoryRegion *mr,
|
||||
uint8_t bar)
|
||||
{
|
||||
memory_region_add_subregion(&proxy->modern_bar,
|
||||
region->offset,
|
||||
®ion->mr);
|
||||
memory_region_add_subregion(mr, region->offset, ®ion->mr);
|
||||
|
||||
cap->cfg_type = region->type;
|
||||
cap->bar = proxy->modern_mem_bar;
|
||||
cap->bar = bar;
|
||||
cap->offset = cpu_to_le32(region->offset);
|
||||
cap->length = cpu_to_le32(region->size);
|
||||
virtio_pci_add_mem_cap(proxy, cap);
|
||||
|
||||
}
|
||||
|
||||
static void virtio_pci_modern_region_unmap(VirtIOPCIProxy *proxy,
|
||||
VirtIOPCIRegion *region)
|
||||
static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
|
||||
VirtIOPCIRegion *region,
|
||||
struct virtio_pci_cap *cap)
|
||||
{
|
||||
virtio_pci_modern_region_map(proxy, region, cap,
|
||||
&proxy->modern_bar, proxy->modern_mem_bar);
|
||||
}
|
||||
|
||||
static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
|
||||
VirtIOPCIRegion *region,
|
||||
struct virtio_pci_cap *cap)
|
||||
{
|
||||
virtio_pci_modern_region_map(proxy, region, cap,
|
||||
&proxy->io_bar, proxy->modern_io_bar);
|
||||
}
|
||||
|
||||
static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
|
||||
VirtIOPCIRegion *region)
|
||||
{
|
||||
memory_region_del_subregion(&proxy->modern_bar,
|
||||
®ion->mr);
|
||||
}
|
||||
|
||||
static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
|
||||
VirtIOPCIRegion *region)
|
||||
{
|
||||
memory_region_del_subregion(&proxy->io_bar,
|
||||
®ion->mr);
|
||||
}
|
||||
|
||||
/* This is called by virtio-bus just after the device is plugged. */
|
||||
static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
|
||||
{
|
||||
|
@ -1429,6 +1626,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
|
|||
VirtioBusState *bus = &proxy->bus;
|
||||
bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
|
||||
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
|
||||
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
|
||||
uint8_t *config;
|
||||
uint32_t size;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
|
@ -1467,16 +1665,31 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
|
|||
.cap.cap_len = sizeof cfg,
|
||||
.cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
|
||||
};
|
||||
struct virtio_pci_cfg_cap *cfg_mask;
|
||||
struct virtio_pci_notify_cap notify_pio = {
|
||||
.cap.cap_len = sizeof notify,
|
||||
.notify_off_multiplier = cpu_to_le32(0x0),
|
||||
};
|
||||
|
||||
/* TODO: add io access for speed */
|
||||
struct virtio_pci_cfg_cap *cfg_mask;
|
||||
|
||||
virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
|
||||
virtio_pci_modern_regions_init(proxy);
|
||||
virtio_pci_modern_region_map(proxy, &proxy->common, &cap);
|
||||
virtio_pci_modern_region_map(proxy, &proxy->isr, &cap);
|
||||
virtio_pci_modern_region_map(proxy, &proxy->device, &cap);
|
||||
virtio_pci_modern_region_map(proxy, &proxy->notify, ¬ify.cap);
|
||||
|
||||
virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
|
||||
virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
|
||||
virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
|
||||
virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap);
|
||||
|
||||
if (modern_pio) {
|
||||
memory_region_init(&proxy->io_bar, OBJECT(proxy),
|
||||
"virtio-pci-io", 0x4);
|
||||
|
||||
pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar,
|
||||
PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
|
||||
|
||||
virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
|
||||
¬ify_pio.cap);
|
||||
}
|
||||
|
||||
pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar,
|
||||
PCI_BASE_ADDRESS_SPACE_MEMORY |
|
||||
|
@ -1532,14 +1745,18 @@ static void virtio_pci_device_unplugged(DeviceState *d)
|
|||
{
|
||||
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
||||
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
|
||||
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
|
||||
|
||||
virtio_pci_stop_ioeventfd(proxy);
|
||||
|
||||
if (modern) {
|
||||
virtio_pci_modern_region_unmap(proxy, &proxy->common);
|
||||
virtio_pci_modern_region_unmap(proxy, &proxy->isr);
|
||||
virtio_pci_modern_region_unmap(proxy, &proxy->device);
|
||||
virtio_pci_modern_region_unmap(proxy, &proxy->notify);
|
||||
virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
|
||||
virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
|
||||
virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
|
||||
virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
|
||||
if (modern_pio) {
|
||||
virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1559,6 +1776,7 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
|
|||
*/
|
||||
proxy->legacy_io_bar = 0;
|
||||
proxy->msix_bar = 1;
|
||||
proxy->modern_io_bar = 2;
|
||||
proxy->modern_mem_bar = 4;
|
||||
|
||||
proxy->common.offset = 0x0;
|
||||
|
@ -1578,6 +1796,10 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
|
|||
QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX;
|
||||
proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
|
||||
|
||||
proxy->notify_pio.offset = 0x0;
|
||||
proxy->notify_pio.size = 0x4;
|
||||
proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
|
||||
|
||||
/* subclasses can enforce modern, so do this unconditionally */
|
||||
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
|
||||
2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
|
||||
|
@ -1592,6 +1814,26 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
|
|||
|
||||
address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as");
|
||||
|
||||
if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE)
|
||||
&& !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN)
|
||||
&& pci_bus_is_express(pci_dev->bus)
|
||||
&& !pci_bus_is_root(pci_dev->bus)) {
|
||||
int pos;
|
||||
|
||||
pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
|
||||
pos = pcie_endpoint_cap_init(pci_dev, 0);
|
||||
assert(pos > 0);
|
||||
|
||||
pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF);
|
||||
assert(pos > 0);
|
||||
|
||||
/*
|
||||
* Indicates that this function complies with revision 1.2 of the
|
||||
* PCI Power Management Interface Specification.
|
||||
*/
|
||||
pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
|
||||
}
|
||||
|
||||
virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
|
||||
if (k->realize) {
|
||||
k->realize(proxy, errp);
|
||||
|
@ -1610,9 +1852,15 @@ static void virtio_pci_reset(DeviceState *qdev)
|
|||
{
|
||||
VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
|
||||
VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
|
||||
int i;
|
||||
|
||||
virtio_pci_stop_ioeventfd(proxy);
|
||||
virtio_bus_reset(bus);
|
||||
msix_unuse_all_vectors(&proxy->pci_dev);
|
||||
|
||||
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
||||
proxy->vqs[i].enabled = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static Property virtio_pci_properties[] = {
|
||||
|
@ -1622,6 +1870,12 @@ static Property virtio_pci_properties[] = {
|
|||
VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT, false),
|
||||
DEFINE_PROP_BIT("disable-modern", VirtIOPCIProxy, flags,
|
||||
VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT, true),
|
||||
DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
|
||||
VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
|
||||
DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
|
||||
VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
|
||||
DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
|
||||
VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
@ -2212,6 +2466,9 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
|
|||
k->load_config = virtio_pci_load_config;
|
||||
k->save_queue = virtio_pci_save_queue;
|
||||
k->load_queue = virtio_pci_load_queue;
|
||||
k->save_extra_state = virtio_pci_save_extra_state;
|
||||
k->load_extra_state = virtio_pci_load_extra_state;
|
||||
k->has_extra_state = virtio_pci_has_extra_state;
|
||||
k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
|
||||
k->set_host_notifier = virtio_pci_set_host_notifier;
|
||||
k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
|
||||
|
|
|
@ -72,8 +72,19 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
|
|||
/* virtio version flags */
|
||||
#define VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT 2
|
||||
#define VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT 3
|
||||
#define VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT 4
|
||||
#define VIRTIO_PCI_FLAG_DISABLE_LEGACY (1 << VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT)
|
||||
#define VIRTIO_PCI_FLAG_DISABLE_MODERN (1 << VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT)
|
||||
#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT)
|
||||
|
||||
/* migrate extra state */
|
||||
#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT 4
|
||||
#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA (1 << VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT)
|
||||
|
||||
/* have pio notification for modern device ? */
|
||||
#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT 5
|
||||
#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
|
||||
(1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)
|
||||
|
||||
typedef struct {
|
||||
MSIMessage msg;
|
||||
|
@ -104,6 +115,14 @@ typedef struct VirtIOPCIRegion {
|
|||
uint32_t type;
|
||||
} VirtIOPCIRegion;
|
||||
|
||||
typedef struct VirtIOPCIQueue {
|
||||
uint16_t num;
|
||||
bool enabled;
|
||||
uint32_t desc[2];
|
||||
uint32_t avail[2];
|
||||
uint32_t used[2];
|
||||
} VirtIOPCIQueue;
|
||||
|
||||
struct VirtIOPCIProxy {
|
||||
PCIDevice pci_dev;
|
||||
MemoryRegion bar;
|
||||
|
@ -111,11 +130,14 @@ struct VirtIOPCIProxy {
|
|||
VirtIOPCIRegion isr;
|
||||
VirtIOPCIRegion device;
|
||||
VirtIOPCIRegion notify;
|
||||
VirtIOPCIRegion notify_pio;
|
||||
MemoryRegion modern_bar;
|
||||
MemoryRegion io_bar;
|
||||
MemoryRegion modern_cfg;
|
||||
AddressSpace modern_as;
|
||||
uint32_t legacy_io_bar;
|
||||
uint32_t msix_bar;
|
||||
uint32_t modern_io_bar;
|
||||
uint32_t modern_mem_bar;
|
||||
int config_cap;
|
||||
uint32_t flags;
|
||||
|
@ -124,13 +146,7 @@ struct VirtIOPCIProxy {
|
|||
uint32_t dfselect;
|
||||
uint32_t gfselect;
|
||||
uint32_t guest_features[2];
|
||||
struct {
|
||||
uint16_t num;
|
||||
bool enabled;
|
||||
uint32_t desc[2];
|
||||
uint32_t avail[2];
|
||||
uint32_t used[2];
|
||||
} vqs[VIRTIO_QUEUE_MAX];
|
||||
VirtIOPCIQueue vqs[VIRTIO_QUEUE_MAX];
|
||||
|
||||
bool ioeventfd_disabled;
|
||||
bool ioeventfd_started;
|
||||
|
|
|
@ -1116,6 +1116,16 @@ static bool virtio_ringsize_needed(void *opaque)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool virtio_extra_state_needed(void *opaque)
|
||||
{
|
||||
VirtIODevice *vdev = opaque;
|
||||
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
|
||||
return k->has_extra_state &&
|
||||
k->has_extra_state(qbus->parent);
|
||||
}
|
||||
|
||||
static void put_virtqueue_state(QEMUFile *f, void *pv, size_t size)
|
||||
{
|
||||
VirtIODevice *vdev = pv;
|
||||
|
@ -1210,6 +1220,53 @@ static const VMStateDescription vmstate_virtio_ringsize = {
|
|||
}
|
||||
};
|
||||
|
||||
static int get_extra_state(QEMUFile *f, void *pv, size_t size)
|
||||
{
|
||||
VirtIODevice *vdev = pv;
|
||||
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
|
||||
if (!k->load_extra_state) {
|
||||
return -1;
|
||||
} else {
|
||||
return k->load_extra_state(qbus->parent, f);
|
||||
}
|
||||
}
|
||||
|
||||
static void put_extra_state(QEMUFile *f, void *pv, size_t size)
|
||||
{
|
||||
VirtIODevice *vdev = pv;
|
||||
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
|
||||
k->save_extra_state(qbus->parent, f);
|
||||
}
|
||||
|
||||
static const VMStateInfo vmstate_info_extra_state = {
|
||||
.name = "virtqueue_extra_state",
|
||||
.get = get_extra_state,
|
||||
.put = put_extra_state,
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_virtio_extra_state = {
|
||||
.name = "virtio/extra_state",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = &virtio_extra_state_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
{
|
||||
.name = "extra_state",
|
||||
.version_id = 0,
|
||||
.field_exists = NULL,
|
||||
.size = 0,
|
||||
.info = &vmstate_info_extra_state,
|
||||
.flags = VMS_SINGLE,
|
||||
.offset = 0,
|
||||
},
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_virtio_device_endian = {
|
||||
.name = "virtio/device_endian",
|
||||
.version_id = 1,
|
||||
|
@ -1245,6 +1302,7 @@ static const VMStateDescription vmstate_virtio = {
|
|||
&vmstate_virtio_64bit_features,
|
||||
&vmstate_virtio_virtqueues,
|
||||
&vmstate_virtio_ringsize,
|
||||
&vmstate_virtio_extra_state,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
|
|
@ -10,7 +10,15 @@
|
|||
.driver = "e1000",\
|
||||
.property = "extra_mac_registers",\
|
||||
.value = "off",\
|
||||
},
|
||||
},{\
|
||||
.driver = "virtio-pci",\
|
||||
.property = "x-disable-pcie",\
|
||||
.value = "on",\
|
||||
},{\
|
||||
.driver = "virtio-pci",\
|
||||
.property = "migrate-extra",\
|
||||
.value = "off",\
|
||||
},
|
||||
|
||||
#define HW_COMPAT_2_3 \
|
||||
{\
|
||||
|
|
|
@ -44,9 +44,12 @@ typedef struct VirtioBusClass {
|
|||
void (*notify)(DeviceState *d, uint16_t vector);
|
||||
void (*save_config)(DeviceState *d, QEMUFile *f);
|
||||
void (*save_queue)(DeviceState *d, int n, QEMUFile *f);
|
||||
void (*save_extra_state)(DeviceState *d, QEMUFile *f);
|
||||
int (*load_config)(DeviceState *d, QEMUFile *f);
|
||||
int (*load_queue)(DeviceState *d, int n, QEMUFile *f);
|
||||
int (*load_done)(DeviceState *d, QEMUFile *f);
|
||||
int (*load_extra_state)(DeviceState *d, QEMUFile *f);
|
||||
bool (*has_extra_state)(DeviceState *d);
|
||||
bool (*query_guest_notifiers)(DeviceState *d);
|
||||
int (*set_guest_notifiers)(DeviceState *d, int nvqs, bool assign);
|
||||
int (*set_host_notifier)(DeviceState *d, int n, bool assigned);
|
||||
|
|
|
@ -53,6 +53,7 @@ extern bool kvm_gsi_routing_allowed;
|
|||
extern bool kvm_gsi_direct_mapping;
|
||||
extern bool kvm_readonly_mem_allowed;
|
||||
extern bool kvm_direct_msi_allowed;
|
||||
extern bool kvm_ioeventfd_any_length_allowed;
|
||||
|
||||
#if defined CONFIG_KVM || !defined NEED_CPU_H
|
||||
#define kvm_enabled() (kvm_allowed)
|
||||
|
@ -153,6 +154,12 @@ extern bool kvm_direct_msi_allowed;
|
|||
*/
|
||||
#define kvm_direct_msi_enabled() (kvm_direct_msi_allowed)
|
||||
|
||||
/**
|
||||
* kvm_ioeventfd_any_length_enabled:
|
||||
* Returns: true if KVM allows any length io eventfd.
|
||||
*/
|
||||
#define kvm_ioeventfd_any_length_enabled() (kvm_ioeventfd_any_length_allowed)
|
||||
|
||||
#else
|
||||
#define kvm_enabled() (0)
|
||||
#define kvm_irqchip_in_kernel() (false)
|
||||
|
@ -166,6 +173,7 @@ extern bool kvm_direct_msi_allowed;
|
|||
#define kvm_gsi_direct_mapping() (false)
|
||||
#define kvm_readonly_mem_enabled() (false)
|
||||
#define kvm_direct_msi_enabled() (false)
|
||||
#define kvm_ioeventfd_any_length_enabled() (false)
|
||||
#endif
|
||||
|
||||
struct kvm_run;
|
||||
|
|
|
@ -109,6 +109,7 @@ bool kvm_allowed;
|
|||
bool kvm_readonly_mem_allowed;
|
||||
bool kvm_vm_attributes_allowed;
|
||||
bool kvm_direct_msi_allowed;
|
||||
bool kvm_ioeventfd_any_length_allowed;
|
||||
|
||||
static const KVMCapabilityInfo kvm_required_capabilites[] = {
|
||||
KVM_CAP_INFO(USER_MEMORY),
|
||||
|
@ -1611,6 +1612,9 @@ static int kvm_init(MachineState *ms)
|
|||
kvm_vm_attributes_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
|
||||
|
||||
kvm_ioeventfd_any_length_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
|
||||
|
||||
ret = kvm_arch_init(ms, s);
|
||||
if (ret < 0) {
|
||||
goto err;
|
||||
|
|
|
@ -30,6 +30,7 @@ bool kvm_gsi_routing_allowed;
|
|||
bool kvm_gsi_direct_mapping;
|
||||
bool kvm_allowed;
|
||||
bool kvm_readonly_mem_allowed;
|
||||
bool kvm_ioeventfd_any_length_allowed;
|
||||
|
||||
int kvm_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
|
|
|
@ -78,7 +78,7 @@ struct vhost_memory {
|
|||
#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
|
||||
/* Give up ownership, and reset the device to default values.
|
||||
* Allows subsequent call to VHOST_OWNER_SET to succeed. */
|
||||
#define VHOST_RESET_DEVICE _IO(VHOST_VIRTIO, 0x02)
|
||||
#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
|
||||
|
||||
/* Set up/modify memory layout */
|
||||
#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory)
|
||||
|
|
8
memory.c
8
memory.c
|
@ -1688,7 +1688,9 @@ void memory_region_add_eventfd(MemoryRegion *mr,
|
|||
};
|
||||
unsigned i;
|
||||
|
||||
adjust_endianness(mr, &mrfd.data, size);
|
||||
if (size) {
|
||||
adjust_endianness(mr, &mrfd.data, size);
|
||||
}
|
||||
memory_region_transaction_begin();
|
||||
for (i = 0; i < mr->ioeventfd_nb; ++i) {
|
||||
if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
|
||||
|
@ -1721,7 +1723,9 @@ void memory_region_del_eventfd(MemoryRegion *mr,
|
|||
};
|
||||
unsigned i;
|
||||
|
||||
adjust_endianness(mr, &mrfd.data, size);
|
||||
if (size) {
|
||||
adjust_endianness(mr, &mrfd.data, size);
|
||||
}
|
||||
memory_region_transaction_begin();
|
||||
for (i = 0; i < mr->ioeventfd_nb; ++i) {
|
||||
if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
|
||||
|
|
|
@ -188,7 +188,7 @@ typedef enum VhostUserRequest {
|
|||
VHOST_USER_GET_FEATURES = 1,
|
||||
VHOST_USER_SET_FEATURES = 2,
|
||||
VHOST_USER_SET_OWNER = 3,
|
||||
VHOST_USER_RESET_DEVICE = 4,
|
||||
VHOST_USER_RESET_OWNER = 4,
|
||||
VHOST_USER_SET_MEM_TABLE = 5,
|
||||
VHOST_USER_SET_LOG_BASE = 6,
|
||||
VHOST_USER_SET_LOG_FD = 7,
|
||||
|
@ -274,7 +274,7 @@ static const char *vubr_request_str[] = {
|
|||
[VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
|
||||
[VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
|
||||
[VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
|
||||
[VHOST_USER_RESET_DEVICE] = "VHOST_USER_RESET_DEVICE",
|
||||
[VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
|
||||
[VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
|
||||
[VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
|
||||
[VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
|
||||
|
@ -921,7 +921,7 @@ vubr_execute_request(VubrDev *dev, VhostUserMsg *vmsg)
|
|||
return vubr_set_features_exec(dev, vmsg);
|
||||
case VHOST_USER_SET_OWNER:
|
||||
return vubr_set_owner_exec(dev, vmsg);
|
||||
case VHOST_USER_RESET_DEVICE:
|
||||
case VHOST_USER_RESET_OWNER:
|
||||
return vubr_reset_device_exec(dev, vmsg);
|
||||
case VHOST_USER_SET_MEM_TABLE:
|
||||
return vubr_set_mem_table_exec(dev, vmsg);
|
||||
|
|
|
@ -57,7 +57,7 @@ typedef enum VhostUserRequest {
|
|||
VHOST_USER_GET_FEATURES = 1,
|
||||
VHOST_USER_SET_FEATURES = 2,
|
||||
VHOST_USER_SET_OWNER = 3,
|
||||
VHOST_USER_RESET_DEVICE = 4,
|
||||
VHOST_USER_RESET_OWNER = 4,
|
||||
VHOST_USER_SET_MEM_TABLE = 5,
|
||||
VHOST_USER_SET_LOG_BASE = 6,
|
||||
VHOST_USER_SET_LOG_FD = 7,
|
||||
|
@ -86,6 +86,11 @@ typedef struct VhostUserMemory {
|
|||
VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
|
||||
} VhostUserMemory;
|
||||
|
||||
typedef struct VhostUserLog {
|
||||
uint64_t mmap_size;
|
||||
uint64_t mmap_offset;
|
||||
} VhostUserLog;
|
||||
|
||||
typedef struct VhostUserMsg {
|
||||
VhostUserRequest request;
|
||||
|
||||
|
@ -94,10 +99,13 @@ typedef struct VhostUserMsg {
|
|||
uint32_t flags;
|
||||
uint32_t size; /* the following payload size */
|
||||
union {
|
||||
#define VHOST_USER_VRING_IDX_MASK (0xff)
|
||||
#define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
|
||||
uint64_t u64;
|
||||
struct vhost_vring_state state;
|
||||
struct vhost_vring_addr addr;
|
||||
VhostUserMemory memory;
|
||||
VhostUserLog log;
|
||||
} payload;
|
||||
} QEMU_PACKED VhostUserMsg;
|
||||
|
||||
|
@ -307,7 +315,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
|
|||
g_cond_signal(&s->data_cond);
|
||||
break;
|
||||
|
||||
case VHOST_USER_RESET_DEVICE:
|
||||
case VHOST_USER_RESET_OWNER:
|
||||
s->fds_num = 0;
|
||||
break;
|
||||
|
||||
|
|
Loading…
Reference in New Issue