vhost: Check for queue full at vhost_svq_add

The series need to expose vhost_svq_add with full functionality,
including checking for full queue.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Eugenio Pérez 2022-07-20 08:59:32 +02:00 committed by Jason Wang
parent 98b5adef84
commit f20b70eb5a
1 changed files with 32 additions and 25 deletions

View File

@ -233,21 +233,29 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
* Add an element to a SVQ. * Add an element to a SVQ.
* *
* The caller must check that there is enough slots for the new element. It * The caller must check that there is enough slots for the new element. It
* takes ownership of the element: In case of failure, it is free and the SVQ * takes ownership of the element: In case of failure not ENOSPC, it is free.
* is considered broken. *
* Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
*/ */
static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) static int vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem)
{ {
unsigned qemu_head; unsigned qemu_head;
bool ok = vhost_svq_add_split(svq, elem, &qemu_head); unsigned ndescs = elem->in_num + elem->out_num;
bool ok;
if (unlikely(ndescs > vhost_svq_available_slots(svq))) {
return -ENOSPC;
}
ok = vhost_svq_add_split(svq, elem, &qemu_head);
if (unlikely(!ok)) { if (unlikely(!ok)) {
g_free(elem); g_free(elem);
return false; return -EINVAL;
} }
svq->ring_id_maps[qemu_head] = elem; svq->ring_id_maps[qemu_head] = elem;
vhost_svq_kick(svq); vhost_svq_kick(svq);
return true; return 0;
} }
/** /**
@ -274,7 +282,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
while (true) { while (true) {
VirtQueueElement *elem; VirtQueueElement *elem;
bool ok; int r;
if (svq->next_guest_avail_elem) { if (svq->next_guest_avail_elem) {
elem = g_steal_pointer(&svq->next_guest_avail_elem); elem = g_steal_pointer(&svq->next_guest_avail_elem);
@ -286,25 +294,24 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
break; break;
} }
if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) { r = vhost_svq_add(svq, elem);
if (unlikely(r != 0)) {
if (r == -ENOSPC) {
/* /*
* This condition is possible since a contiguous buffer in GPA * This condition is possible since a contiguous buffer in
* does not imply a contiguous buffer in qemu's VA * GPA does not imply a contiguous buffer in qemu's VA
* scatter-gather segments. If that happens, the buffer exposed * scatter-gather segments. If that happens, the buffer
* to the device needs to be a chain of descriptors at this * exposed to the device needs to be a chain of descriptors
* moment. * at this moment.
* *
* SVQ cannot hold more available buffers if we are here: * SVQ cannot hold more available buffers if we are here:
* queue the current guest descriptor and ignore further kicks * queue the current guest descriptor and ignore kicks
* until some elements are used. * until some elements are used.
*/ */
svq->next_guest_avail_elem = elem; svq->next_guest_avail_elem = elem;
return;
} }
ok = vhost_svq_add(svq, elem); /* VQ is full or broken, just return and ignore kicks */
if (unlikely(!ok)) {
/* VQ is broken, just return and ignore any other kicks */
return; return;
} }
} }