mirror of https://github.com/xemu-project/xemu.git
scsi: Await request purging
scsi_device_for_each_req_async() currently does not provide any way to be awaited. One of its callers is scsi_device_purge_requests(), which therefore currently does not guarantee that all requests are fully settled when it returns. We want all requests to be settled, because scsi_device_purge_requests() is called through the unrealize path, including the one invoked by virtio_scsi_hotunplug() through qdev_simple_device_unplug_cb(), which most likely assumes that all SCSI requests are done then. In fact, scsi_device_purge_requests() already contains a blk_drain(), but this will not fully await scsi_device_for_each_req_async(), only the I/O requests it potentially cancels (not the non-I/O requests). However, we can have scsi_device_for_each_req_async() increment the BB in-flight counter, and have scsi_device_for_each_req_async_bh() decrement it when it is done. This way, the blk_drain() will fully await all SCSI requests to be purged. This also removes the need for scsi_device_for_each_req_async_bh() to double-check the current context and potentially re-schedule itself, should it now differ from the BB's context: Changing a BB's AioContext with a root node is done through bdrv_try_change_aio_context(), which creates a drained section. With this patch, we keep the BB in-flight counter elevated throughout, so we know the BB's context cannot change. Signed-off-by: Hanna Czenczek <hreitz@redhat.com> Message-ID: <20240202144755.671354-3-hreitz@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
ad89367202
commit
1604c04931
|
@ -120,17 +120,13 @@ static void scsi_device_for_each_req_async_bh(void *opaque)
|
||||||
SCSIRequest *next;
|
SCSIRequest *next;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the AioContext changed before this BH was called then reschedule into
|
* The BB cannot have changed contexts between this BH being scheduled and
|
||||||
* the new AioContext before accessing ->requests. This can happen when
|
* now: BBs' AioContexts, when they have a node attached, can only be
|
||||||
* scsi_device_for_each_req_async() is called and then the AioContext is
|
* changed via bdrv_try_change_aio_context(), in a drained section. While
|
||||||
* changed before BHs are run.
|
* we have the in-flight counter incremented, that drain must block.
|
||||||
*/
|
*/
|
||||||
ctx = blk_get_aio_context(s->conf.blk);
|
ctx = blk_get_aio_context(s->conf.blk);
|
||||||
if (ctx != qemu_get_current_aio_context()) {
|
assert(ctx == qemu_get_current_aio_context());
|
||||||
aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh,
|
|
||||||
g_steal_pointer(&data));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
|
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
|
||||||
data->fn(req, data->fn_opaque);
|
data->fn(req, data->fn_opaque);
|
||||||
|
@ -138,11 +134,16 @@ static void scsi_device_for_each_req_async_bh(void *opaque)
|
||||||
|
|
||||||
/* Drop the reference taken by scsi_device_for_each_req_async() */
|
/* Drop the reference taken by scsi_device_for_each_req_async() */
|
||||||
object_unref(OBJECT(s));
|
object_unref(OBJECT(s));
|
||||||
|
|
||||||
|
/* Paired with blk_inc_in_flight() in scsi_device_for_each_req_async() */
|
||||||
|
blk_dec_in_flight(s->conf.blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
|
* Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
|
||||||
* runs in the AioContext that is executing the request.
|
* runs in the AioContext that is executing the request.
|
||||||
|
* Keeps the BlockBackend's in-flight counter incremented until everything is
|
||||||
|
* done, so draining it will settle all scheduled @fn() calls.
|
||||||
*/
|
*/
|
||||||
static void scsi_device_for_each_req_async(SCSIDevice *s,
|
static void scsi_device_for_each_req_async(SCSIDevice *s,
|
||||||
void (*fn)(SCSIRequest *, void *),
|
void (*fn)(SCSIRequest *, void *),
|
||||||
|
@ -163,6 +164,8 @@ static void scsi_device_for_each_req_async(SCSIDevice *s,
|
||||||
*/
|
*/
|
||||||
object_ref(OBJECT(s));
|
object_ref(OBJECT(s));
|
||||||
|
|
||||||
|
/* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */
|
||||||
|
blk_inc_in_flight(s->conf.blk);
|
||||||
aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
|
aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
|
||||||
scsi_device_for_each_req_async_bh,
|
scsi_device_for_each_req_async_bh,
|
||||||
data);
|
data);
|
||||||
|
@ -1728,11 +1731,20 @@ static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque)
|
||||||
scsi_req_cancel_async(req, NULL);
|
scsi_req_cancel_async(req, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cancel all requests, and block until they are deleted.
|
||||||
|
*/
|
||||||
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
|
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
|
||||||
{
|
{
|
||||||
scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL);
|
scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Await all the scsi_device_purge_one_req() calls scheduled by
|
||||||
|
* scsi_device_for_each_req_async(), and all I/O requests that were
|
||||||
|
* cancelled this way, but may still take a bit of time to settle.
|
||||||
|
*/
|
||||||
blk_drain(sdev->conf.blk);
|
blk_drain(sdev->conf.blk);
|
||||||
|
|
||||||
scsi_device_set_ua(sdev, sense);
|
scsi_device_set_ua(sdev, sense);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue