mirror of https://github.com/xemu-project/xemu.git
virtio: fixes
fixes all over the place Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmK6NQoPHG1zdEByZWRo YXQuY29tAAoJECgfDbjSjVRp/sQIAJGiYliUHElJapM/4KSsXKWCFtk9B8wJuUie yeMKOdD6QSk9tk/HkYSCnMB7G6Fe+MtoE+sPm/6l5nOFqvqVVJw9vOKteWHSpQ0E 9CgbR7s7K1MoLG9J613iB2OtAfhPrWEvSOJ6mvTAxGgxhCQw6UzC88cYfBHJ/efn GAhVlriSfSCRANmivjY+g4h4JFWWSMTH6m9u4wKBJF8GRkNgN+C50Z+bp8aE7wRT KiMoaaYUDOjxzD+8nGYggg/t+UIM7jG2t8M5BMbC0NMP+ovVZeesWK6ZOzoda2tI ZONV0dLikLCicyOvfMH6YDqzGtchCDmS0hpfuorhlzsntm42RBM= =T+gr -----END PGP SIGNATURE----- Merge tag 'for_upstream' of git://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging virtio: fixes fixes all over the place Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # -----BEGIN PGP SIGNATURE----- # # iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmK6NQoPHG1zdEByZWRo # YXQuY29tAAoJECgfDbjSjVRp/sQIAJGiYliUHElJapM/4KSsXKWCFtk9B8wJuUie # yeMKOdD6QSk9tk/HkYSCnMB7G6Fe+MtoE+sPm/6l5nOFqvqVVJw9vOKteWHSpQ0E # 9CgbR7s7K1MoLG9J613iB2OtAfhPrWEvSOJ6mvTAxGgxhCQw6UzC88cYfBHJ/efn # GAhVlriSfSCRANmivjY+g4h4JFWWSMTH6m9u4wKBJF8GRkNgN+C50Z+bp8aE7wRT # KiMoaaYUDOjxzD+8nGYggg/t+UIM7jG2t8M5BMbC0NMP+ovVZeesWK6ZOzoda2tI # ZONV0dLikLCicyOvfMH6YDqzGtchCDmS0hpfuorhlzsntm42RBM= # =T+gr # -----END PGP SIGNATURE----- # gpg: Signature made Tue 28 Jun 2022 04:24:02 AM +0530 # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [undefined] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [undefined] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * tag 'for_upstream' of git://git.kernel.org/pub/scm/virt/kvm/mst/qemu: include/hw/virtio: document vhost_ack_features include/hw/virtio: document vhost_get_features contrib/vhost-user-blk: fix 32 bit build and enable MAINTAINERS: Collect memory device files in "Memory devices" libvhost-user: Fix VHOST_USER_ADD_MEM_REG reply libvhost-user: Fix VHOST_USER_GET_MAX_MEM_SLOTS reply docs/vhost-user: Fix mismerge virtio-iommu: Fix migration regression vhost: setup error eventfd and dump errors vhost: add method vhost_set_vring_err msi: fix MSI vector limit check in msi_set_mask() virtio-iommu: Fix the partial copy of probe request Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
2a8835cb45
25
MAINTAINERS
25
MAINTAINERS
|
@ -1840,7 +1840,6 @@ R: Ani Sinha <ani@anisinha.ca>
|
|||
S: Supported
|
||||
F: include/hw/acpi/*
|
||||
F: include/hw/firmware/smbios.h
|
||||
F: hw/mem/*
|
||||
F: hw/acpi/*
|
||||
F: hw/smbios/*
|
||||
F: hw/i386/acpi-build.[hc]
|
||||
|
@ -1851,6 +1850,7 @@ F: tests/qtest/acpi-utils.[hc]
|
|||
F: tests/data/acpi/
|
||||
F: docs/specs/acpi_cpu_hotplug.rst
|
||||
F: docs/specs/acpi_mem_hotplug.rst
|
||||
F: docs/specs/acpi_nvdimm.rst
|
||||
F: docs/specs/acpi_pci_hotplug.rst
|
||||
F: docs/specs/acpi_hw_reduced_hotplug.rst
|
||||
|
||||
|
@ -2158,15 +2158,6 @@ F: qapi/rocker.json
|
|||
F: tests/rocker/
|
||||
F: docs/specs/rocker.txt
|
||||
|
||||
NVDIMM
|
||||
M: Xiao Guangrong <xiaoguangrong.eric@gmail.com>
|
||||
S: Maintained
|
||||
F: hw/acpi/nvdimm.c
|
||||
F: hw/mem/nvdimm.c
|
||||
F: include/hw/mem/nvdimm.h
|
||||
F: docs/nvdimm.txt
|
||||
F: docs/specs/acpi_nvdimm.rst
|
||||
|
||||
e1000x
|
||||
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
|
||||
S: Maintained
|
||||
|
@ -2588,6 +2579,7 @@ M: Ben Widawsky <ben.widawsky@intel.com>
|
|||
M: Jonathan Cameron <jonathan.cameron@huawei.com>
|
||||
S: Supported
|
||||
F: hw/cxl/
|
||||
F: hw/mem/cxl_type3.c
|
||||
F: include/hw/cxl/
|
||||
|
||||
Dirty Bitmaps
|
||||
|
@ -2704,6 +2696,19 @@ F: softmmu/physmem.c
|
|||
F: include/exec/memory-internal.h
|
||||
F: scripts/coccinelle/memory-region-housekeeping.cocci
|
||||
|
||||
Memory devices
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
M: Igor Mammedov <imammedo@redhat.com>
|
||||
R: Xiao Guangrong <xiaoguangrong.eric@gmail.com>
|
||||
S: Supported
|
||||
F: hw/mem/memory-device.c
|
||||
F: hw/mem/nvdimm.c
|
||||
F: hw/mem/pc-dimm.c
|
||||
F: include/hw/mem/memory-device.h
|
||||
F: include/hw/mem/nvdimm.h
|
||||
F: include/hw/mem/pc-dimm.h
|
||||
F: docs/nvdimm.txt
|
||||
|
||||
SPICE
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
S: Odd Fixes
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
# FIXME: broken on 32-bit architectures
|
||||
executable('vhost-user-blk', files('vhost-user-blk.c'),
|
||||
dependencies: [qemuutil, vhost_user],
|
||||
build_by_default: false,
|
||||
build_by_default: targetos == 'linux',
|
||||
install: false)
|
||||
|
|
|
@ -146,7 +146,7 @@ vub_readv(VubReq *req, struct iovec *iov, uint32_t iovcnt)
|
|||
req->size = vub_iov_size(iov, iovcnt);
|
||||
rc = preadv(vdev_blk->blk_fd, iov, iovcnt, req->sector_num * 512);
|
||||
if (rc < 0) {
|
||||
fprintf(stderr, "%s, Sector %"PRIu64", Size %lu failed with %s\n",
|
||||
fprintf(stderr, "%s, Sector %"PRIu64", Size %zu failed with %s\n",
|
||||
vdev_blk->blk_name, req->sector_num, req->size,
|
||||
strerror(errno));
|
||||
return -1;
|
||||
|
@ -169,7 +169,7 @@ vub_writev(VubReq *req, struct iovec *iov, uint32_t iovcnt)
|
|||
req->size = vub_iov_size(iov, iovcnt);
|
||||
rc = pwritev(vdev_blk->blk_fd, iov, iovcnt, req->sector_num * 512);
|
||||
if (rc < 0) {
|
||||
fprintf(stderr, "%s, Sector %"PRIu64", Size %lu failed with %s\n",
|
||||
fprintf(stderr, "%s, Sector %"PRIu64", Size %zu failed with %s\n",
|
||||
vdev_blk->blk_name, req->sector_num, req->size,
|
||||
strerror(errno));
|
||||
return -1;
|
||||
|
@ -188,7 +188,7 @@ vub_discard_write_zeroes(VubReq *req, struct iovec *iov, uint32_t iovcnt,
|
|||
|
||||
size = vub_iov_size(iov, iovcnt);
|
||||
if (size != sizeof(*desc)) {
|
||||
fprintf(stderr, "Invalid size %ld, expect %ld\n", size, sizeof(*desc));
|
||||
fprintf(stderr, "Invalid size %zd, expect %zd\n", size, sizeof(*desc));
|
||||
return -1;
|
||||
}
|
||||
buf = g_new0(char, size);
|
||||
|
|
|
@ -1376,14 +1376,6 @@ Front-end message types
|
|||
For further details on postcopy, see ``VHOST_USER_SET_MEM_TABLE``.
|
||||
They apply to ``VHOST_USER_ADD_MEM_REG`` accordingly.
|
||||
|
||||
Exactly one file descriptor from which the memory is mapped is
|
||||
passed in the ancillary data.
|
||||
|
||||
In postcopy mode (see ``VHOST_USER_POSTCOPY_LISTEN``), the back-end
|
||||
replies with the bases of the memory mapped region to the front-end.
|
||||
For further details on postcopy, see ``VHOST_USER_SET_MEM_TABLE``.
|
||||
They apply to ``VHOST_USER_ADD_MEM_REG`` accordingly.
|
||||
|
||||
``VHOST_USER_REM_MEM_REG``
|
||||
:id: 38
|
||||
:equivalent ioctl: N/A
|
||||
|
@ -1408,14 +1400,6 @@ Front-end message types
|
|||
accept messages with one file descriptor. If a file descriptor is
|
||||
passed, the back-end MUST close it without using it otherwise.
|
||||
|
||||
The memory region to be removed is identified by its guest address,
|
||||
user address and size. The mmap offset is ignored.
|
||||
|
||||
No file descriptors SHOULD be passed in the ancillary data. For
|
||||
compatibility with existing incorrect implementations, the back-end MAY
|
||||
accept messages with one file descriptor. If a file descriptor is
|
||||
passed, the back-end MUST close it without using it otherwise.
|
||||
|
||||
``VHOST_USER_SET_STATUS``
|
||||
:id: 39
|
||||
:equivalent ioctl: VHOST_VDPA_SET_STATUS
|
||||
|
|
|
@ -322,9 +322,9 @@ void msi_set_mask(PCIDevice *dev, int vector, bool mask, Error **errp)
|
|||
bool msi64bit = flags & PCI_MSI_FLAGS_64BIT;
|
||||
uint32_t irq_state, vector_mask, pending;
|
||||
|
||||
if (vector > PCI_MSI_VECTORS_MAX) {
|
||||
if (vector >= PCI_MSI_VECTORS_MAX) {
|
||||
error_setg(errp, "msi: vector %d not allocated. max vector is %d",
|
||||
vector, PCI_MSI_VECTORS_MAX);
|
||||
vector, (PCI_MSI_VECTORS_MAX - 1));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -146,6 +146,12 @@ static int vhost_kernel_set_vring_call(struct vhost_dev *dev,
|
|||
return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file);
|
||||
}
|
||||
|
||||
static int vhost_kernel_set_vring_err(struct vhost_dev *dev,
|
||||
struct vhost_vring_file *file)
|
||||
{
|
||||
return vhost_kernel_call(dev, VHOST_SET_VRING_ERR, file);
|
||||
}
|
||||
|
||||
static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
|
||||
struct vhost_vring_state *s)
|
||||
{
|
||||
|
@ -309,6 +315,7 @@ const VhostOps kernel_ops = {
|
|||
.vhost_get_vring_base = vhost_kernel_get_vring_base,
|
||||
.vhost_set_vring_kick = vhost_kernel_set_vring_kick,
|
||||
.vhost_set_vring_call = vhost_kernel_set_vring_call,
|
||||
.vhost_set_vring_err = vhost_kernel_set_vring_err,
|
||||
.vhost_set_vring_busyloop_timeout =
|
||||
vhost_kernel_set_vring_busyloop_timeout,
|
||||
.vhost_set_features = vhost_kernel_set_features,
|
||||
|
|
|
@ -1313,6 +1313,11 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
|
|||
return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
|
||||
}
|
||||
|
||||
static int vhost_user_set_vring_err(struct vhost_dev *dev,
|
||||
struct vhost_vring_file *file)
|
||||
{
|
||||
return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file);
|
||||
}
|
||||
|
||||
static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
|
||||
{
|
||||
|
@ -2616,6 +2621,7 @@ const VhostOps user_ops = {
|
|||
.vhost_get_vring_base = vhost_user_get_vring_base,
|
||||
.vhost_set_vring_kick = vhost_user_set_vring_kick,
|
||||
.vhost_set_vring_call = vhost_user_set_vring_call,
|
||||
.vhost_set_vring_err = vhost_user_set_vring_err,
|
||||
.vhost_set_features = vhost_user_set_features,
|
||||
.vhost_get_features = vhost_user_get_features,
|
||||
.vhost_set_owner = vhost_user_set_owner,
|
||||
|
|
|
@ -1278,6 +1278,19 @@ static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vhost_virtqueue_error_notifier(EventNotifier *n)
|
||||
{
|
||||
struct vhost_virtqueue *vq = container_of(n, struct vhost_virtqueue,
|
||||
error_notifier);
|
||||
struct vhost_dev *dev = vq->dev;
|
||||
int index = vq - dev->vqs;
|
||||
|
||||
if (event_notifier_test_and_clear(n) && dev->vdev) {
|
||||
VHOST_OPS_DEBUG(-EINVAL, "vhost vring error in virtqueue %d",
|
||||
dev->vq_index + index);
|
||||
}
|
||||
}
|
||||
|
||||
static int vhost_virtqueue_init(struct vhost_dev *dev,
|
||||
struct vhost_virtqueue *vq, int n)
|
||||
{
|
||||
|
@ -1299,7 +1312,27 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
|
|||
|
||||
vq->dev = dev;
|
||||
|
||||
if (dev->vhost_ops->vhost_set_vring_err) {
|
||||
r = event_notifier_init(&vq->error_notifier, 0);
|
||||
if (r < 0) {
|
||||
goto fail_call;
|
||||
}
|
||||
|
||||
file.fd = event_notifier_get_fd(&vq->error_notifier);
|
||||
r = dev->vhost_ops->vhost_set_vring_err(dev, &file);
|
||||
if (r) {
|
||||
VHOST_OPS_DEBUG(r, "vhost_set_vring_err failed");
|
||||
goto fail_err;
|
||||
}
|
||||
|
||||
event_notifier_set_handler(&vq->error_notifier,
|
||||
vhost_virtqueue_error_notifier);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail_err:
|
||||
event_notifier_cleanup(&vq->error_notifier);
|
||||
fail_call:
|
||||
event_notifier_cleanup(&vq->masked_notifier);
|
||||
return r;
|
||||
|
@ -1308,6 +1341,10 @@ fail_call:
|
|||
static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
|
||||
{
|
||||
event_notifier_cleanup(&vq->masked_notifier);
|
||||
if (vq->dev->vhost_ops->vhost_set_vring_err) {
|
||||
event_notifier_set_handler(&vq->error_notifier, NULL);
|
||||
event_notifier_cleanup(&vq->error_notifier);
|
||||
}
|
||||
}
|
||||
|
||||
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
|
||||
|
|
|
@ -675,11 +675,10 @@ static int virtio_iommu_probe(VirtIOIOMMU *s,
|
|||
|
||||
static int virtio_iommu_iov_to_req(struct iovec *iov,
|
||||
unsigned int iov_cnt,
|
||||
void *req, size_t req_sz)
|
||||
void *req, size_t payload_sz)
|
||||
{
|
||||
size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail);
|
||||
size_t sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
|
||||
|
||||
sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
|
||||
if (unlikely(sz != payload_sz)) {
|
||||
return VIRTIO_IOMMU_S_INVAL;
|
||||
}
|
||||
|
@ -692,7 +691,8 @@ static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s, \
|
|||
unsigned int iov_cnt) \
|
||||
{ \
|
||||
struct virtio_iommu_req_ ## __req req; \
|
||||
int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \
|
||||
int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, \
|
||||
sizeof(req) - sizeof(struct virtio_iommu_req_tail));\
|
||||
\
|
||||
return ret ? ret : virtio_iommu_ ## __req(s, &req); \
|
||||
}
|
||||
|
@ -1322,6 +1322,14 @@ static int iommu_post_load(void *opaque, int version_id)
|
|||
VirtIOIOMMU *s = opaque;
|
||||
|
||||
g_tree_foreach(s->domains, reconstruct_endpoints, s);
|
||||
|
||||
/*
|
||||
* Memory regions are dynamically turned on/off depending on
|
||||
* 'config.bypass' and attached domain type if there is. After
|
||||
* migration, we need to make sure the memory regions are
|
||||
* still correct.
|
||||
*/
|
||||
virtio_iommu_switch_address_space_all(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -69,6 +69,8 @@ typedef int (*vhost_set_vring_kick_op)(struct vhost_dev *dev,
|
|||
struct vhost_vring_file *file);
|
||||
typedef int (*vhost_set_vring_call_op)(struct vhost_dev *dev,
|
||||
struct vhost_vring_file *file);
|
||||
typedef int (*vhost_set_vring_err_op)(struct vhost_dev *dev,
|
||||
struct vhost_vring_file *file);
|
||||
typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev,
|
||||
struct vhost_vring_state *r);
|
||||
typedef int (*vhost_set_features_op)(struct vhost_dev *dev,
|
||||
|
@ -145,6 +147,7 @@ typedef struct VhostOps {
|
|||
vhost_get_vring_base_op vhost_get_vring_base;
|
||||
vhost_set_vring_kick_op vhost_set_vring_kick;
|
||||
vhost_set_vring_call_op vhost_set_vring_call;
|
||||
vhost_set_vring_err_op vhost_set_vring_err;
|
||||
vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout;
|
||||
vhost_set_features_op vhost_set_features;
|
||||
vhost_get_features_op vhost_get_features;
|
||||
|
|
|
@ -29,6 +29,7 @@ struct vhost_virtqueue {
|
|||
unsigned long long used_phys;
|
||||
unsigned used_size;
|
||||
EventNotifier masked_notifier;
|
||||
EventNotifier error_notifier;
|
||||
struct vhost_dev *dev;
|
||||
};
|
||||
|
||||
|
@ -246,8 +247,29 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);
|
|||
*/
|
||||
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
|
||||
bool mask);
|
||||
|
||||
/**
|
||||
* vhost_get_features() - return a sanitised set of feature bits
|
||||
* @hdev: common vhost_dev structure
|
||||
* @feature_bits: pointer to terminated table of feature bits
|
||||
* @features: original feature set
|
||||
*
|
||||
* This returns a set of features bits that is an intersection of what
|
||||
* is supported by the vhost backend (hdev->features), the supported
|
||||
* feature_bits and the requested feature set.
|
||||
*/
|
||||
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
|
||||
uint64_t features);
|
||||
|
||||
/**
|
||||
* vhost_ack_features() - set vhost acked_features
|
||||
* @hdev: common vhost_dev structure
|
||||
* @feature_bits: pointer to terminated table of feature bits
|
||||
* @features: requested feature set
|
||||
*
|
||||
* This sets the internal hdev->acked_features to the intersection of
|
||||
* the backends advertised features and the supported feature_bits.
|
||||
*/
|
||||
void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
|
||||
uint64_t features);
|
||||
bool vhost_has_free_slot(void);
|
||||
|
|
|
@ -1516,7 +1516,7 @@ have_vhost_user_blk_server = get_option('vhost_user_blk_server') \
|
|||
error_message: 'vhost_user_blk_server requires linux') \
|
||||
.require(have_vhost_user,
|
||||
error_message: 'vhost_user_blk_server requires vhost-user support') \
|
||||
.disable_auto_if(not have_system) \
|
||||
.disable_auto_if(not have_tools and not have_system) \
|
||||
.allowed()
|
||||
|
||||
if get_option('fuse').disabled() and get_option('fuse_lseek').enabled()
|
||||
|
|
|
@ -779,15 +779,9 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
|
|||
|
||||
/* Send the message back to qemu with the addresses filled in. */
|
||||
vmsg->fd_num = 0;
|
||||
if (!vu_send_reply(dev, dev->sock, vmsg)) {
|
||||
vu_panic(dev, "failed to respond to add-mem-region for postcopy");
|
||||
return false;
|
||||
}
|
||||
|
||||
DPRINT("Successfully added new region in postcopy\n");
|
||||
dev->nregions++;
|
||||
return false;
|
||||
|
||||
return true;
|
||||
} else {
|
||||
for (i = 0; i < dev->max_queues; i++) {
|
||||
if (dev->vq[i].vring.desc) {
|
||||
|
@ -1827,18 +1821,11 @@ vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg)
|
|||
|
||||
static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg)
|
||||
{
|
||||
vmsg->flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION;
|
||||
vmsg->size = sizeof(vmsg->payload.u64);
|
||||
vmsg->payload.u64 = VHOST_USER_MAX_RAM_SLOTS;
|
||||
vmsg->fd_num = 0;
|
||||
|
||||
if (!vu_message_write(dev, dev->sock, vmsg)) {
|
||||
vu_panic(dev, "Failed to send max ram slots: %s\n", strerror(errno));
|
||||
}
|
||||
vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_RAM_SLOTS);
|
||||
|
||||
DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS);
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
|
|
Loading…
Reference in New Issue