mirror of https://github.com/xqemu/xqemu.git
virtio-pci: speedup MSI-X masking and unmasking
This patch tries to speed up the MSI-X masking and unmasking through the mapping between vector and queues. With this patch it will there's no need to go through all possible virtqueues, which may help to reduce the time spent when doing MSI-X masking/unmasking a single vector when more than hundreds or even thousands of virtqueues were supported. Tested with 80 queue pairs virito-net-pci by changing the smp affinity in the background and doing netperf in the same time: Before the patch: 5711.70 Gbits/sec After the patch: 6830.98 Gbits/sec About 19.6% improvements in throughput. Cc: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
e0d686bf4b
commit
851c2a75a6
|
@ -630,28 +630,30 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
|
|||
{
|
||||
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
int ret, queue_no;
|
||||
VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
|
||||
int ret, index, unmasked = 0;
|
||||
|
||||
for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
|
||||
if (!virtio_queue_get_num(vdev, queue_no)) {
|
||||
while (vq) {
|
||||
index = virtio_get_queue_index(vq);
|
||||
if (!virtio_queue_get_num(vdev, index)) {
|
||||
break;
|
||||
}
|
||||
if (virtio_queue_vector(vdev, queue_no) != vector) {
|
||||
continue;
|
||||
}
|
||||
ret = virtio_pci_vq_vector_unmask(proxy, queue_no, vector, msg);
|
||||
ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
|
||||
if (ret < 0) {
|
||||
goto undo;
|
||||
}
|
||||
vq = virtio_vector_next_queue(vq);
|
||||
++unmasked;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
undo:
|
||||
while (--queue_no >= 0) {
|
||||
if (virtio_queue_vector(vdev, queue_no) != vector) {
|
||||
continue;
|
||||
}
|
||||
virtio_pci_vq_vector_mask(proxy, queue_no, vector);
|
||||
vq = virtio_vector_first_queue(vdev, vector);
|
||||
while (vq && --unmasked >= 0) {
|
||||
index = virtio_get_queue_index(vq);
|
||||
virtio_pci_vq_vector_mask(proxy, index, vector);
|
||||
vq = virtio_vector_next_queue(vq);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -660,16 +662,16 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
|
|||
{
|
||||
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
int queue_no;
|
||||
VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
|
||||
int index;
|
||||
|
||||
for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
|
||||
if (!virtio_queue_get_num(vdev, queue_no)) {
|
||||
while (vq) {
|
||||
index = virtio_get_queue_index(vq);
|
||||
if (!virtio_queue_get_num(vdev, index)) {
|
||||
break;
|
||||
}
|
||||
if (virtio_queue_vector(vdev, queue_no) != vector) {
|
||||
continue;
|
||||
}
|
||||
virtio_pci_vq_vector_mask(proxy, queue_no, vector);
|
||||
virtio_pci_vq_vector_mask(proxy, index, vector);
|
||||
vq = virtio_vector_next_queue(vq);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue