virtio-pci: decouple the single vector from the interrupt process

To reuse the interrupt process in configure interrupt
Need to decouple the single vector from the interrupt process. Add new function
kvm_virtio_pci_vector_use_one and _release_one. These functions are use
for the single vector, the whole process will finish in a loop for the vq number.

Signed-off-by: Cindy Lu <lulu@redhat.com>
Message-Id: <20211104164827.21911-4-lulu@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Cindy Lu 2021-11-05 00:48:20 +08:00 committed by Michael S. Tsirkin
parent e3480ef81f
commit 316011b8a7
1 changed files with 73 additions and 58 deletions

View File

@ -677,7 +677,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
} }
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
unsigned int queue_no,
unsigned int vector) unsigned int vector)
{ {
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
@ -740,30 +739,28 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
return 0; return 0;
} }
static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
{ {
unsigned int vector;
int ret;
EventNotifier *n;
PCIDevice *dev = &proxy->pci_dev; PCIDevice *dev = &proxy->pci_dev;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
unsigned int vector;
int ret, queue_no;
EventNotifier *n;
for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) {
break;
}
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
if (ret < 0) { if (ret < 0) {
break; return ret;
} }
if (vector >= msix_nr_vectors_allocated(dev)) { if (vector >= msix_nr_vectors_allocated(dev)) {
continue; return 0;
} }
ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
if (ret < 0) { if (ret < 0) {
goto undo; goto undo;
} }
/* If guest supports masking, set up irqfd now. /*
* If guest supports masking, set up irqfd now.
* Otherwise, delay until unmasked in the frontend. * Otherwise, delay until unmasked in the frontend.
*/ */
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
@ -773,55 +770,73 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
goto undo; goto undo;
} }
} }
}
return 0;
return 0;
undo: undo:
while (--queue_no >= 0) {
vector = virtio_queue_vector(vdev, queue_no); vector = virtio_queue_vector(vdev, queue_no);
if (vector >= msix_nr_vectors_allocated(dev)) { if (vector >= msix_nr_vectors_allocated(dev)) {
continue; return ret;
} }
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
if (ret < 0) { if (ret < 0) {
break; return ret;
} }
kvm_virtio_pci_irqfd_release(proxy, n, vector); kvm_virtio_pci_irqfd_release(proxy, n, vector);
} }
kvm_virtio_pci_vq_vector_release(proxy, vector); return ret;
}
static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
{
int queue_no;
int ret = 0;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) {
return -1;
}
ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
} }
return ret; return ret;
} }
static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
int queue_no)
{ {
PCIDevice *dev = &proxy->pci_dev;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
unsigned int vector; unsigned int vector;
int queue_no;
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
EventNotifier *n; EventNotifier *n;
int ret; int ret;
for (queue_no = 0; queue_no < nvqs; queue_no++) { VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (!virtio_queue_get_num(vdev, queue_no)) { PCIDevice *dev = &proxy->pci_dev;
break;
}
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
if (ret < 0) { if (ret < 0) {
break; return;
} }
if (vector >= msix_nr_vectors_allocated(dev)) { if (vector >= msix_nr_vectors_allocated(dev)) {
continue; return;
} }
/* If guest supports masking, clean up irqfd now.
* Otherwise, it was cleaned when masked in the frontend.
*/
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, n, vector); kvm_virtio_pci_irqfd_release(proxy, n, vector);
} }
kvm_virtio_pci_vq_vector_release(proxy, vector); kvm_virtio_pci_vq_vector_release(proxy, vector);
} }
static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
{
int queue_no;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) {
break;
}
kvm_virtio_pci_vector_release_one(proxy, queue_no);
}
} }
static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy, static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy,