mirror of https://github.com/xemu-project/xemu.git
block: Add Error to blk_set_aio_context()
Add an Error parameter to blk_set_aio_context() and use bdrv_child_try_set_aio_context() internally to check whether all involved nodes can actually support the AioContext switch. Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
45e92a9011
commit
97896a4887
|
@ -1865,30 +1865,36 @@ static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
|
||||||
return blk_get_aio_context(blk_acb->blk);
|
return blk_get_aio_context(blk_acb->blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
|
static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
|
||||||
bool update_root_node)
|
bool update_root_node, Error **errp)
|
||||||
{
|
{
|
||||||
BlockDriverState *bs = blk_bs(blk);
|
BlockDriverState *bs = blk_bs(blk);
|
||||||
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
|
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (bs) {
|
if (bs) {
|
||||||
|
if (update_root_node) {
|
||||||
|
ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root,
|
||||||
|
errp);
|
||||||
|
if (ret < 0) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (tgm->throttle_state) {
|
if (tgm->throttle_state) {
|
||||||
bdrv_drained_begin(bs);
|
bdrv_drained_begin(bs);
|
||||||
throttle_group_detach_aio_context(tgm);
|
throttle_group_detach_aio_context(tgm);
|
||||||
throttle_group_attach_aio_context(tgm, new_context);
|
throttle_group_attach_aio_context(tgm, new_context);
|
||||||
bdrv_drained_end(bs);
|
bdrv_drained_end(bs);
|
||||||
}
|
}
|
||||||
if (update_root_node) {
|
|
||||||
GSList *ignore = g_slist_prepend(NULL, blk->root);
|
|
||||||
bdrv_set_aio_context_ignore(bs, new_context, &ignore);
|
|
||||||
g_slist_free(ignore);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
|
int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
|
||||||
|
Error **errp)
|
||||||
{
|
{
|
||||||
blk_do_set_aio_context(blk, new_context, true);
|
return blk_do_set_aio_context(blk, new_context, true, errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
|
static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
|
||||||
|
@ -1915,7 +1921,7 @@ static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
|
||||||
GSList **ignore)
|
GSList **ignore)
|
||||||
{
|
{
|
||||||
BlockBackend *blk = child->opaque;
|
BlockBackend *blk = child->opaque;
|
||||||
blk_do_set_aio_context(blk, ctx, false);
|
blk_do_set_aio_context(blk, ctx, false, &error_abort);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_add_aio_context_notifier(BlockBackend *blk,
|
void blk_add_aio_context_notifier(BlockBackend *blk,
|
||||||
|
|
|
@ -173,6 +173,7 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
||||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||||
unsigned i;
|
unsigned i;
|
||||||
unsigned nvqs = s->conf->num_queues;
|
unsigned nvqs = s->conf->num_queues;
|
||||||
|
Error *local_err = NULL;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (vblk->dataplane_started || s->starting) {
|
if (vblk->dataplane_started || s->starting) {
|
||||||
|
@ -212,7 +213,11 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
||||||
vblk->dataplane_started = true;
|
vblk->dataplane_started = true;
|
||||||
trace_virtio_blk_data_plane_start(s);
|
trace_virtio_blk_data_plane_start(s);
|
||||||
|
|
||||||
blk_set_aio_context(s->conf->conf.blk, s->ctx);
|
r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err);
|
||||||
|
if (r < 0) {
|
||||||
|
error_report_err(local_err);
|
||||||
|
goto fail_guest_notifiers;
|
||||||
|
}
|
||||||
|
|
||||||
/* Kick right away to begin processing requests already in vring */
|
/* Kick right away to begin processing requests already in vring */
|
||||||
for (i = 0; i < nvqs; i++) {
|
for (i = 0; i < nvqs; i++) {
|
||||||
|
@ -281,8 +286,9 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
||||||
aio_context_acquire(s->ctx);
|
aio_context_acquire(s->ctx);
|
||||||
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
|
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
|
||||||
|
|
||||||
/* Drain and switch bs back to the QEMU main loop */
|
/* Drain and try to switch bs back to the QEMU main loop. If other users
|
||||||
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
|
* keep the BlockBackend in the iothread, that's ok */
|
||||||
|
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
|
||||||
|
|
||||||
aio_context_release(s->ctx);
|
aio_context_release(s->ctx);
|
||||||
|
|
||||||
|
|
|
@ -682,7 +682,8 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
|
||||||
}
|
}
|
||||||
|
|
||||||
aio_context_acquire(dataplane->ctx);
|
aio_context_acquire(dataplane->ctx);
|
||||||
blk_set_aio_context(dataplane->blk, qemu_get_aio_context());
|
/* Xen doesn't have multiple users for nodes, so this can't fail */
|
||||||
|
blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort);
|
||||||
aio_context_release(dataplane->ctx);
|
aio_context_release(dataplane->ctx);
|
||||||
|
|
||||||
xendev = dataplane->xendev;
|
xendev = dataplane->xendev;
|
||||||
|
@ -811,7 +812,8 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
|
||||||
}
|
}
|
||||||
|
|
||||||
aio_context_acquire(dataplane->ctx);
|
aio_context_acquire(dataplane->ctx);
|
||||||
blk_set_aio_context(dataplane->blk, dataplane->ctx);
|
/* If other users keep the BlockBackend in the iothread, that's ok */
|
||||||
|
blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL);
|
||||||
aio_context_release(dataplane->ctx);
|
aio_context_release(dataplane->ctx);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
|
@ -795,6 +795,7 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||||
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
|
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
|
||||||
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
||||||
SCSIDevice *sd = SCSI_DEVICE(dev);
|
SCSIDevice *sd = SCSI_DEVICE(dev);
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (s->ctx && !s->dataplane_fenced) {
|
if (s->ctx && !s->dataplane_fenced) {
|
||||||
AioContext *ctx;
|
AioContext *ctx;
|
||||||
|
@ -808,9 +809,11 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
virtio_scsi_acquire(s);
|
virtio_scsi_acquire(s);
|
||||||
blk_set_aio_context(sd->conf.blk, s->ctx);
|
ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
|
||||||
virtio_scsi_release(s);
|
virtio_scsi_release(s);
|
||||||
|
if (ret < 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
|
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
|
||||||
|
@ -839,7 +842,8 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||||
|
|
||||||
if (s->ctx) {
|
if (s->ctx) {
|
||||||
virtio_scsi_acquire(s);
|
virtio_scsi_acquire(s);
|
||||||
blk_set_aio_context(sd->conf.blk, qemu_get_aio_context());
|
/* If other users keep the BlockBackend in the iothread, that's ok */
|
||||||
|
blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
|
||||||
virtio_scsi_release(s);
|
virtio_scsi_release(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -208,7 +208,8 @@ void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason);
|
||||||
void blk_op_block_all(BlockBackend *blk, Error *reason);
|
void blk_op_block_all(BlockBackend *blk, Error *reason);
|
||||||
void blk_op_unblock_all(BlockBackend *blk, Error *reason);
|
void blk_op_unblock_all(BlockBackend *blk, Error *reason);
|
||||||
AioContext *blk_get_aio_context(BlockBackend *blk);
|
AioContext *blk_get_aio_context(BlockBackend *blk);
|
||||||
void blk_set_aio_context(BlockBackend *blk, AioContext *new_context);
|
int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
|
||||||
|
Error **errp);
|
||||||
void blk_add_aio_context_notifier(BlockBackend *blk,
|
void blk_add_aio_context_notifier(BlockBackend *blk,
|
||||||
void (*attached_aio_context)(AioContext *new_context, void *opaque),
|
void (*attached_aio_context)(AioContext *new_context, void *opaque),
|
||||||
void (*detach_aio_context)(void *opaque), void *opaque);
|
void (*detach_aio_context)(void *opaque), void *opaque);
|
||||||
|
|
|
@ -678,7 +678,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
|
||||||
s = bs->opaque;
|
s = bs->opaque;
|
||||||
blk_insert_bs(blk, bs, &error_abort);
|
blk_insert_bs(blk, bs, &error_abort);
|
||||||
|
|
||||||
blk_set_aio_context(blk, ctx_a);
|
blk_set_aio_context(blk, ctx_a, &error_abort);
|
||||||
aio_context_acquire(ctx_a);
|
aio_context_acquire(ctx_a);
|
||||||
|
|
||||||
s->bh_indirection_ctx = ctx_b;
|
s->bh_indirection_ctx = ctx_b;
|
||||||
|
@ -742,7 +742,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
|
||||||
}
|
}
|
||||||
|
|
||||||
aio_context_acquire(ctx_a);
|
aio_context_acquire(ctx_a);
|
||||||
blk_set_aio_context(blk, qemu_get_aio_context());
|
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
|
||||||
aio_context_release(ctx_a);
|
aio_context_release(ctx_a);
|
||||||
|
|
||||||
bdrv_unref(bs);
|
bdrv_unref(bs);
|
||||||
|
@ -903,7 +903,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
|
||||||
if (use_iothread) {
|
if (use_iothread) {
|
||||||
iothread = iothread_new();
|
iothread = iothread_new();
|
||||||
ctx = iothread_get_aio_context(iothread);
|
ctx = iothread_get_aio_context(iothread);
|
||||||
blk_set_aio_context(blk_src, ctx);
|
blk_set_aio_context(blk_src, ctx, &error_abort);
|
||||||
} else {
|
} else {
|
||||||
ctx = qemu_get_aio_context();
|
ctx = qemu_get_aio_context();
|
||||||
}
|
}
|
||||||
|
@ -1001,7 +1001,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
|
||||||
g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
|
g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
|
||||||
|
|
||||||
if (use_iothread) {
|
if (use_iothread) {
|
||||||
blk_set_aio_context(blk_src, qemu_get_aio_context());
|
blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
|
||||||
}
|
}
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
|
|
||||||
|
|
|
@ -342,14 +342,14 @@ static void test_sync_op(const void *opaque)
|
||||||
blk_insert_bs(blk, bs, &error_abort);
|
blk_insert_bs(blk, bs, &error_abort);
|
||||||
c = QLIST_FIRST(&bs->parents);
|
c = QLIST_FIRST(&bs->parents);
|
||||||
|
|
||||||
blk_set_aio_context(blk, ctx);
|
blk_set_aio_context(blk, ctx, &error_abort);
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
t->fn(c);
|
t->fn(c);
|
||||||
if (t->blkfn) {
|
if (t->blkfn) {
|
||||||
t->blkfn(blk);
|
t->blkfn(blk);
|
||||||
}
|
}
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
blk_set_aio_context(blk, qemu_get_aio_context());
|
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
|
||||||
|
|
||||||
bdrv_unref(bs);
|
bdrv_unref(bs);
|
||||||
blk_unref(blk);
|
blk_unref(blk);
|
||||||
|
@ -428,7 +428,7 @@ static void test_attach_blockjob(void)
|
||||||
aio_poll(qemu_get_aio_context(), false);
|
aio_poll(qemu_get_aio_context(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_set_aio_context(blk, ctx);
|
blk_set_aio_context(blk, ctx, &error_abort);
|
||||||
|
|
||||||
tjob->n = 0;
|
tjob->n = 0;
|
||||||
while (tjob->n == 0) {
|
while (tjob->n == 0) {
|
||||||
|
@ -436,7 +436,7 @@ static void test_attach_blockjob(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
blk_set_aio_context(blk, qemu_get_aio_context());
|
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
|
|
||||||
tjob->n = 0;
|
tjob->n = 0;
|
||||||
|
@ -444,7 +444,7 @@ static void test_attach_blockjob(void)
|
||||||
aio_poll(qemu_get_aio_context(), false);
|
aio_poll(qemu_get_aio_context(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_set_aio_context(blk, ctx);
|
blk_set_aio_context(blk, ctx, &error_abort);
|
||||||
|
|
||||||
tjob->n = 0;
|
tjob->n = 0;
|
||||||
while (tjob->n == 0) {
|
while (tjob->n == 0) {
|
||||||
|
@ -453,7 +453,7 @@ static void test_attach_blockjob(void)
|
||||||
|
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
job_complete_sync(&tjob->common.job, &error_abort);
|
job_complete_sync(&tjob->common.job, &error_abort);
|
||||||
blk_set_aio_context(blk, qemu_get_aio_context());
|
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
|
|
||||||
bdrv_unref(bs);
|
bdrv_unref(bs);
|
||||||
|
@ -497,7 +497,7 @@ static void test_propagate_basic(void)
|
||||||
bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
|
bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
|
||||||
|
|
||||||
/* Switch the AioContext */
|
/* Switch the AioContext */
|
||||||
blk_set_aio_context(blk, ctx);
|
blk_set_aio_context(blk, ctx, &error_abort);
|
||||||
g_assert(blk_get_aio_context(blk) == ctx);
|
g_assert(blk_get_aio_context(blk) == ctx);
|
||||||
g_assert(bdrv_get_aio_context(bs_a) == ctx);
|
g_assert(bdrv_get_aio_context(bs_a) == ctx);
|
||||||
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
|
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
|
||||||
|
@ -505,7 +505,7 @@ static void test_propagate_basic(void)
|
||||||
|
|
||||||
/* Switch the AioContext back */
|
/* Switch the AioContext back */
|
||||||
ctx = qemu_get_aio_context();
|
ctx = qemu_get_aio_context();
|
||||||
blk_set_aio_context(blk, ctx);
|
blk_set_aio_context(blk, ctx, &error_abort);
|
||||||
g_assert(blk_get_aio_context(blk) == ctx);
|
g_assert(blk_get_aio_context(blk) == ctx);
|
||||||
g_assert(bdrv_get_aio_context(bs_a) == ctx);
|
g_assert(bdrv_get_aio_context(bs_a) == ctx);
|
||||||
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
|
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
|
||||||
|
@ -565,7 +565,7 @@ static void test_propagate_diamond(void)
|
||||||
blk_insert_bs(blk, bs_verify, &error_abort);
|
blk_insert_bs(blk, bs_verify, &error_abort);
|
||||||
|
|
||||||
/* Switch the AioContext */
|
/* Switch the AioContext */
|
||||||
blk_set_aio_context(blk, ctx);
|
blk_set_aio_context(blk, ctx, &error_abort);
|
||||||
g_assert(blk_get_aio_context(blk) == ctx);
|
g_assert(blk_get_aio_context(blk) == ctx);
|
||||||
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
|
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
|
||||||
g_assert(bdrv_get_aio_context(bs_a) == ctx);
|
g_assert(bdrv_get_aio_context(bs_a) == ctx);
|
||||||
|
@ -574,7 +574,7 @@ static void test_propagate_diamond(void)
|
||||||
|
|
||||||
/* Switch the AioContext back */
|
/* Switch the AioContext back */
|
||||||
ctx = qemu_get_aio_context();
|
ctx = qemu_get_aio_context();
|
||||||
blk_set_aio_context(blk, ctx);
|
blk_set_aio_context(blk, ctx, &error_abort);
|
||||||
g_assert(blk_get_aio_context(blk) == ctx);
|
g_assert(blk_get_aio_context(blk) == ctx);
|
||||||
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
|
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
|
||||||
g_assert(bdrv_get_aio_context(bs_a) == ctx);
|
g_assert(bdrv_get_aio_context(bs_a) == ctx);
|
||||||
|
@ -654,7 +654,7 @@ static void test_propagate_mirror(void)
|
||||||
job_cancel_sync_all();
|
job_cancel_sync_all();
|
||||||
|
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
blk_set_aio_context(blk, main_ctx);
|
blk_set_aio_context(blk, main_ctx, &error_abort);
|
||||||
bdrv_try_set_aio_context(target, main_ctx, &error_abort);
|
bdrv_try_set_aio_context(target, main_ctx, &error_abort);
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue