diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index f610a25d2e..30405b5113 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -222,9 +222,27 @@ static void acpi_pcihp_eject_slot(AcpiPciHpState *s, unsigned bsel, unsigned slo PCIDevice *dev = PCI_DEVICE(qdev); if (PCI_SLOT(dev->devfn) == slot) { if (!acpi_pcihp_pc_no_hotplug(s, dev)) { - hotplug_ctrl = qdev_get_hotplug_handler(qdev); - hotplug_handler_unplug(hotplug_ctrl, qdev, &error_abort); - object_unparent(OBJECT(qdev)); + /* + * partially_hotplugged is used by virtio-net failover: + * failover has asked the guest OS to unplug the device + * but we need to keep some references to the device + * to be able to plug it back in case of failure so + * we don't execute hotplug_handler_unplug(). + */ + if (dev->partially_hotplugged) { + /* + * pending_deleted_event is set to true when + * virtio-net failover asks to unplug the device, + * and set to false here when the operation is done + * This is used by the migration loop to detect the + * end of the operation and really start the migration. + */ + qdev->pending_deleted_event = false; + } else { + hotplug_ctrl = qdev_get_hotplug_handler(qdev); + hotplug_handler_unplug(hotplug_ctrl, qdev, &error_abort); + object_unparent(OBJECT(qdev)); + } } } } @@ -396,6 +414,12 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, return; } + /* + * pending_deleted_event is used by virtio-net failover to detect the + * end of the unplug operation, the flag is set to false in + * acpi_pcihp_eject_slot() when the operation is completed. + */ + pdev->qdev.pending_deleted_event = true; s->acpi_pcihp_pci_status[bsel].down |= (1U << slot); acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); } diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 294499ee20..f584449d8d 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -3629,6 +3629,12 @@ static void vtd_init(IntelIOMMUState *s) vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits, x86_iommu->dt_supported); + if (s->scalable_mode) { + vtd_spte_rsvd[1] &= ~VTD_SPTE_SNP; + vtd_spte_rsvd_large[2] &= ~VTD_SPTE_SNP; + vtd_spte_rsvd_large[3] &= ~VTD_SPTE_SNP; + } + if (x86_iommu_ir_supported(x86_iommu)) { s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV; if (s->intr_eim == ON_OFF_AUTO_ON) { diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 3d5487fe2c..a6c788049b 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -388,6 +388,8 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0fff8 /* Rsvd field masks for spte */ +#define VTD_SPTE_SNP 0x800ULL + #define VTD_SPTE_PAGE_L1_RSVD_MASK(aw, dt_supported) \ dt_supported ? \ (0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index c5ed266337..d7d73a31e4 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -929,8 +929,8 @@ void pcie_add_capability(PCIDevice *dev, uint16_t offset, uint16_t size) { assert(offset >= PCI_CONFIG_SPACE_SIZE); - assert(offset < offset + size); - assert(offset + size <= PCIE_CONFIG_SPACE_SIZE); + assert(offset < (uint16_t)(offset + size)); + assert((uint16_t)(offset + size) <= PCIE_CONFIG_SPACE_SIZE); assert(size >= 8); assert(pci_is_express(dev)); diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index c6962fcbfe..9a4f491b54 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -231,7 +231,7 @@ static void balloon_stats_poll_cb(void *opaque) return; } - virtqueue_push(s->svq, s->stats_vq_elem, s->stats_vq_offset); + virtqueue_push(s->svq, s->stats_vq_elem, 0); virtio_notify(vdev, s->svq); g_free(s->stats_vq_elem); s->stats_vq_elem = NULL; @@ -438,7 +438,7 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) memory_region_unref(section.mr); } - virtqueue_push(vq, elem, offset); + virtqueue_push(vq, elem, 0); virtio_notify(vdev, vq); g_free(elem); virtio_balloon_pbp_free(&pbp); @@ -510,6 +510,7 @@ static bool get_free_page_hints(VirtIOBalloon *dev) VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtQueue *vq = dev->free_page_vq; bool ret = true; + int i; while (dev->block_iothread) { qemu_cond_wait(&dev->free_page_cond, &dev->free_page_lock); @@ -544,12 +545,14 @@ static bool get_free_page_hints(VirtIOBalloon *dev) } if (elem->in_num && dev->free_page_hint_status == FREE_PAGE_HINT_S_START) { - qemu_guest_free_page_hint(elem->in_sg[0].iov_base, - elem->in_sg[0].iov_len); + for (i = 0; i < elem->in_num; i++) { + qemu_guest_free_page_hint(elem->in_sg[i].iov_base, + elem->in_sg[i].iov_len); + } } out: - virtqueue_push(vq, elem, 1); + virtqueue_push(vq, elem, 0); g_free(elem); return ret; } diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 7b3ebca178..72da12fea5 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -817,6 +817,17 @@ static char *virtio_mmio_bus_get_dev_path(DeviceState *dev) return path; } +static void virtio_mmio_vmstate_change(DeviceState *d, bool running) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + + if (running) { + virtio_mmio_start_ioeventfd(proxy); + } else { + virtio_mmio_stop_ioeventfd(proxy); + } +} + static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) { BusClass *bus_class = BUS_CLASS(klass); @@ -832,6 +843,7 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled; k->ioeventfd_assign = virtio_mmio_ioeventfd_assign; k->pre_plugged = virtio_mmio_pre_plugged; + k->vmstate_change = virtio_mmio_vmstate_change; k->has_variable_vring_alignment = true; bus_class->max_dev = 1; bus_class->get_dev_path = virtio_mmio_bus_get_dev_path; diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 2e3c22a8c7..25dd6dd975 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -170,9 +170,17 @@ static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, return true; } +/** Dummy receive in case qemu falls back to userland tap networking */ +static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + return 0; +} + static NetClientInfo net_vhost_vdpa_info = { .type = NET_CLIENT_DRIVER_VHOST_VDPA, .size = sizeof(VhostVDPAState), + .receive = vhost_vdpa_receive, .cleanup = vhost_vdpa_cleanup, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, .has_ufo = vhost_vdpa_has_ufo,