From a1abf40d6be2fc4b40d90ae3b46442f4a671776b Mon Sep 17 00:00:00 2001
From: Gonglei <arei.gonglei@huawei.com>
Date: Sat, 12 Jul 2014 11:43:37 +0800
Subject: [PATCH 1/3] linux-aio: Fix laio resource leak

when hotplug virtio-scsi disks using laio, the aio_nr will
increase in laio_init() by io_setup(), we can see the number by
  # cat /proc/sys/fs/aio-nr
  128
if the aio_nr attach the maxnum, which found from
  # cat /proc/sys/fs/aio-max-nr
  65536
the hotplug process will fail because of aio context leak.

Fix it by io_destroy in laio_cleanup().

Reported-by: daifulai <daifulai@huawei.com>
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 block/linux-aio.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/block/linux-aio.c b/block/linux-aio.c
index 48673690ac..7ac7e8c99c 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -310,5 +310,10 @@ void laio_cleanup(void *s_)
     struct qemu_laio_state *s = s_;
 
     event_notifier_cleanup(&s->e);
+
+    if (io_destroy(s->ctx) != 0) {
+        fprintf(stderr, "%s: destroy AIO context %p failed\n",
+                        __func__, &s->ctx);
+    }
     g_free(s);
 }

From e926d9b8c52d5ddf413617df4b341a3114642b14 Mon Sep 17 00:00:00 2001
From: Ming Lei <ming.lei@canonical.com>
Date: Sat, 12 Jul 2014 12:08:52 +0800
Subject: [PATCH 2/3] virtio-blk: data-plane: fix save/set .complete_request in
 start

The callback has to be saved and reset in virtio_blk_data_plane_start(),
otherwise dataplane's requests will be completed in qemu aio context.

Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 hw/block/dataplane/virtio-blk.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 227bb15efc..e88862dc50 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -125,7 +125,6 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
                                   Error **errp)
 {
     VirtIOBlockDataPlane *s;
-    VirtIOBlock *vblk = VIRTIO_BLK(vdev);
     Error *local_err = NULL;
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
@@ -178,8 +177,6 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
     bdrv_op_block_all(blk->conf.bs, s->blocker);
 
     *dataplane = s;
-    s->saved_complete_request = vblk->complete_request;
-    vblk->complete_request = complete_request_vring;
 }
 
 /* Context: QEMU global mutex held */
@@ -201,6 +198,7 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
 {
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+    VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
     VirtQueue *vq;
 
     if (s->started) {
@@ -234,6 +232,9 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
     }
     s->host_notifier = *virtio_queue_get_host_notifier(vq);
 
+    s->saved_complete_request = vblk->complete_request;
+    vblk->complete_request = complete_request_vring;
+
     s->starting = false;
     s->started = true;
     trace_virtio_blk_data_plane_start(s);

From 5b2ffbe4d99843fd8305c573a100047a8c962327 Mon Sep 17 00:00:00 2001
From: Ming Lei <tom.leiming@gmail.com>
Date: Sat, 12 Jul 2014 12:08:53 +0800
Subject: [PATCH 3/3] virtio-blk: dataplane: notify guest as a batch

Now requests are submitted as a batch, so it is natural
to notify guest as a batch too.

This may suppress interrupt notification to VM a lot:

        - in my test, decreased by ~13K/sec

Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 hw/block/dataplane/virtio-blk.c | 20 +++++++++++++++++++-
 1 file changed, 19 insertions(+), 1 deletion(-)

diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index e88862dc50..d6ba65ca23 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -34,6 +34,7 @@ struct VirtIOBlockDataPlane {
     VirtIODevice *vdev;
     Vring vring;                    /* virtqueue vring */
     EventNotifier *guest_notifier;  /* irq */
+    QEMUBH *bh;                     /* bh for guest notification */
 
     /* Note that these EventNotifiers are assigned by value.  This is
      * fine as long as you do not call event_notifier_cleanup on them
@@ -61,13 +62,28 @@ static void notify_guest(VirtIOBlockDataPlane *s)
     event_notifier_set(s->guest_notifier);
 }
 
+static void notify_guest_bh(void *opaque)
+{
+    VirtIOBlockDataPlane *s = opaque;
+
+    notify_guest(s);
+}
+
 static void complete_request_vring(VirtIOBlockReq *req, unsigned char status)
 {
+    VirtIOBlockDataPlane *s = req->dev->dataplane;
     stb_p(&req->in->status, status);
 
     vring_push(&req->dev->dataplane->vring, &req->elem,
                req->qiov.size + sizeof(*req->in));
-    notify_guest(req->dev->dataplane);
+
+    /* Suppress notification to guest by BH and its scheduled
+     * flag because requests are completed as a batch after io
+     * plug & unplug is introduced, and the BH can still be
+     * executed in dataplane aio context even after it is
+     * stopped, so needn't worry about notification loss with BH.
+     */
+    qemu_bh_schedule(s->bh);
 }
 
 static void handle_notify(EventNotifier *e)
@@ -172,6 +188,7 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
         s->iothread = &s->internal_iothread_obj;
     }
     s->ctx = iothread_get_aio_context(s->iothread);
+    s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
 
     error_setg(&s->blocker, "block device is in use by data plane");
     bdrv_op_block_all(blk->conf.bs, s->blocker);
@@ -190,6 +207,7 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
     bdrv_op_unblock_all(s->blk->conf.bs, s->blocker);
     error_free(s->blocker);
     object_unref(OBJECT(s->iothread));
+    qemu_bh_delete(s->bh);
     g_free(s);
 }