block: stop relying on io_flush() in bdrv_drain_all()

If a block driver has no file descriptors to monitor but there are still
active requests, it can return 1 from .io_flush().  This is used to spin
during synchronous I/O.

Stop relying on .io_flush() and instead check
QLIST_EMPTY(&bs->tracked_requests) to decide whether there are active
requests.

This is the first step in removing .io_flush() so that event loops no
longer need to have the concept of synchronous I/O.  Eventually we may
be able to kill synchronous I/O completely by running everything in a
coroutine, but that is future work.

Note this patch moves bs->throttled_reqs initialization to bdrv_new() so
that bdrv_requests_pending(bs) can safely access it.  In practice bs is
g_malloc0() so the memory is already zeroed but it's safer to initialize
the queue properly.

We also need to fix up block/stream.c:close_unused_images() to prevent
traversing a dangling pointer while it rearranges the backing file
chain.  This is necessary since the new bdrv_drain_all() traverses the
backing file chain.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2013-04-11 15:41:13 +02:00
parent e1b5c52e04
commit 88266f5aa7
2 changed files with 40 additions and 11 deletions

45
block.c
View File

@ -148,7 +148,6 @@ static void bdrv_block_timer(void *opaque)
void bdrv_io_limits_enable(BlockDriverState *bs)
{
qemu_co_queue_init(&bs->throttled_reqs);
bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
bs->io_limits_enabled = true;
}
@ -306,6 +305,7 @@ BlockDriverState *bdrv_new(const char *device_name)
bdrv_iostatus_disable(bs);
notifier_list_init(&bs->close_notifiers);
notifier_with_return_list_init(&bs->before_write_notifiers);
qemu_co_queue_init(&bs->throttled_reqs);
return bs;
}
@ -1428,6 +1428,35 @@ void bdrv_close_all(void)
}
}
/* Check if any requests are in-flight (including throttled requests) */
static bool bdrv_requests_pending(BlockDriverState *bs)
{
if (!QLIST_EMPTY(&bs->tracked_requests)) {
return true;
}
if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
return true;
}
if (bs->file && bdrv_requests_pending(bs->file)) {
return true;
}
if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
return true;
}
return false;
}
static bool bdrv_requests_pending_all(void)
{
BlockDriverState *bs;
QTAILQ_FOREACH(bs, &bdrv_states, list) {
if (bdrv_requests_pending(bs)) {
return true;
}
}
return false;
}
/*
* Wait for pending requests to complete across all BlockDriverStates
*
@ -1442,12 +1471,11 @@ void bdrv_close_all(void)
*/
void bdrv_drain_all(void)
{
/* Always run first iteration so any pending completion BHs run */
bool busy = true;
BlockDriverState *bs;
bool busy;
do {
busy = qemu_aio_wait();
while (busy) {
/* FIXME: We do not have timer support here, so this is effectively
* a busy wait.
*/
@ -1456,12 +1484,9 @@ void bdrv_drain_all(void)
busy = true;
}
}
} while (busy);
/* If requests are still pending there is a bug somewhere */
QTAILQ_FOREACH(bs, &bdrv_states, list) {
assert(QLIST_EMPTY(&bs->tracked_requests));
assert(qemu_co_queue_empty(&bs->throttled_reqs));
busy = bdrv_requests_pending_all();
busy |= aio_poll(qemu_get_aio_context(), busy);
}
}

View File

@ -57,6 +57,11 @@ static void close_unused_images(BlockDriverState *top, BlockDriverState *base,
BlockDriverState *intermediate;
intermediate = top->backing_hd;
/* Must assign before bdrv_delete() to prevent traversing dangling pointer
* while we delete backing image instances.
*/
top->backing_hd = base;
while (intermediate) {
BlockDriverState *unused;
@ -70,7 +75,6 @@ static void close_unused_images(BlockDriverState *top, BlockDriverState *base,
unused->backing_hd = NULL;
bdrv_delete(unused);
}
top->backing_hd = base;
}
static void coroutine_fn stream_run(void *opaque)