mirror of https://github.com/xemu-project/xemu.git
Block layer patches
- virtio-blk: Multiqueue support (configurable iothread per queue) - Made NBD export and hw/scsi thread-safe without AioContext lock - Fix crash when loading snapshot on inactive node -----BEGIN PGP SIGNATURE----- iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmWEw/8RHGt3b2xmQHJl ZGhhdC5jb20ACgkQfwmycsiPL9bX0Q/9G+Qx8mQGmbxJzvtW7/1eaeJ5CPCYT8w3 033S5hCil43mvX2aQKTFrh1Nz4aYlqMDyURvNu7nigyADY+kBpzzJ1MFr6WQrzYv QEk4jf/FOllfKn8+/A0z2NJDhtpVgqKKHBsFZl8FBUcxd79daTaoPPM3BNNsOHQD o7Z7hR/iEdG9dkAh/fpwctsgMO/CoN0BRRyN2OByj03zeu1TlDJ6lX0hxlcJl9Jw vLo81rWTCqKRu+SbjBsb0HfYE2hP54A4hvxn4I9vYGYDz8ElucluYyeqUEK+mdrX /DQBdb+Osl1FD6MuIaFR+Rgp9Mu5h6ZOdvUyCY0zuByti851hV8qjW9BtrTfqaMh LMOKoL6c5B8XJYWVGAGrJexIw1hHq5WKdXN9zp4FZA4tOyHUMRjHuR1+zScU6gnU WRSIQR46w75A13clWyJs9Hf/q5Fp/1KT4nfuZ/hmiXvxdsYY5x1w/W3s9tRNjYKL d6FVk17cFc6Ksb7lWvDCgg61BNZtGm4Clmw0kJ6V1reiQz7AvDLmduLUQbmrVt7G gWAY4b2L9YXJpEx5en0kE50KLAUw/E9ozbOq6ZT9nFUKeNAPC8PS5lK7vYVwebCk VA0t8pFzKhdB1bJaG5fMSRPBuqkvhsaDEEDABlSro8dyyjoQBaEdk5P9Kxe66hBc xhTmDPdv/JM= =E3Zh -----END PGP SIGNATURE----- Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging Block layer patches - virtio-blk: Multiqueue support (configurable iothread per queue) - Made NBD export and hw/scsi thread-safe without AioContext lock - Fix crash when loading snapshot on inactive node # -----BEGIN PGP SIGNATURE----- # # iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmWEw/8RHGt3b2xmQHJl # ZGhhdC5jb20ACgkQfwmycsiPL9bX0Q/9G+Qx8mQGmbxJzvtW7/1eaeJ5CPCYT8w3 # 033S5hCil43mvX2aQKTFrh1Nz4aYlqMDyURvNu7nigyADY+kBpzzJ1MFr6WQrzYv # QEk4jf/FOllfKn8+/A0z2NJDhtpVgqKKHBsFZl8FBUcxd79daTaoPPM3BNNsOHQD # o7Z7hR/iEdG9dkAh/fpwctsgMO/CoN0BRRyN2OByj03zeu1TlDJ6lX0hxlcJl9Jw # vLo81rWTCqKRu+SbjBsb0HfYE2hP54A4hvxn4I9vYGYDz8ElucluYyeqUEK+mdrX # /DQBdb+Osl1FD6MuIaFR+Rgp9Mu5h6ZOdvUyCY0zuByti851hV8qjW9BtrTfqaMh # LMOKoL6c5B8XJYWVGAGrJexIw1hHq5WKdXN9zp4FZA4tOyHUMRjHuR1+zScU6gnU # WRSIQR46w75A13clWyJs9Hf/q5Fp/1KT4nfuZ/hmiXvxdsYY5x1w/W3s9tRNjYKL # d6FVk17cFc6Ksb7lWvDCgg61BNZtGm4Clmw0kJ6V1reiQz7AvDLmduLUQbmrVt7G # gWAY4b2L9YXJpEx5en0kE50KLAUw/E9ozbOq6ZT9nFUKeNAPC8PS5lK7vYVwebCk # VA0t8pFzKhdB1bJaG5fMSRPBuqkvhsaDEEDABlSro8dyyjoQBaEdk5P9Kxe66hBc # xhTmDPdv/JM= # =E3Zh # -----END PGP SIGNATURE----- # gpg: Signature made Thu 21 Dec 2023 18:02:23 EST # gpg: using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6 # gpg: issuer "kwolf@redhat.com" # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full] # Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6 * tag 'for-upstream' of https://repo.or.cz/qemu/kevin: (33 commits) virtio-blk: add iothread-vq-mapping parameter qdev: add IOThreadVirtQueueMappingList property type qdev-properties: alias all object class properties string-output-visitor: show structs as "<omitted>" block-coroutine-wrapper: use qemu_get_current_aio_context() block: remove outdated AioContext locking comments job: remove outdated AioContext locking comments scsi: remove outdated AioContext lock comment docs: remove AioContext lock from IOThread docs aio: remove aio_context_acquire()/aio_context_release() API aio-wait: draw equivalence between AIO_WAIT_WHILE() and AIO_WAIT_WHILE_UNLOCKED() scsi: remove AioContext locking block: remove bdrv_co_lock() block: remove AioContext locking graph-lock: remove AioContext locking aio: make aio_context_acquire()/aio_context_release() a no-op tests: remove aio_context_acquire() tests scsi: assert that callbacks run in the correct AioContext virtio-scsi: replace AioContext lock with tmf_bh_lock dma-helpers: don't lock AioContext in dma_blk_cb() ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
6370d13c62
363
block.c
363
block.c
|
@ -1616,16 +1616,10 @@ out:
|
|||
g_free(gen_node_name);
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller must always hold @bs AioContext lock, because this function calls
|
||||
* bdrv_refresh_total_sectors() which polls when called from non-coroutine
|
||||
* context.
|
||||
*/
|
||||
static int no_coroutine_fn GRAPH_UNLOCKED
|
||||
bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name,
|
||||
QDict *options, int open_flags, Error **errp)
|
||||
{
|
||||
AioContext *ctx;
|
||||
Error *local_err = NULL;
|
||||
int i, ret;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
@ -1673,21 +1667,15 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name,
|
|||
bs->supported_read_flags |= BDRV_REQ_REGISTERED_BUF;
|
||||
bs->supported_write_flags |= BDRV_REQ_REGISTERED_BUF;
|
||||
|
||||
/* Get the context after .bdrv_open, it can change the context */
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
ret = bdrv_refresh_total_sectors(bs, bs->total_sectors);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not refresh total sector count");
|
||||
aio_context_release(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_refresh_limits(bs, NULL, &local_err);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
aio_context_release(ctx);
|
||||
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
|
@ -1708,12 +1696,12 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name,
|
|||
open_failed:
|
||||
bs->drv = NULL;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
if (bs->file != NULL) {
|
||||
bdrv_unref_child(bs, bs->file);
|
||||
assert(!bs->file);
|
||||
}
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
g_free(bs->opaque);
|
||||
bs->opaque = NULL;
|
||||
|
@ -2908,7 +2896,7 @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm)
|
|||
* Replaces the node that a BdrvChild points to without updating permissions.
|
||||
*
|
||||
* If @new_bs is non-NULL, the parent of @child must already be drained through
|
||||
* @child and the caller must hold the AioContext lock for @new_bs.
|
||||
* @child.
|
||||
*/
|
||||
static void GRAPH_WRLOCK
|
||||
bdrv_replace_child_noperm(BdrvChild *child, BlockDriverState *new_bs)
|
||||
|
@ -3048,9 +3036,8 @@ static TransactionActionDrv bdrv_attach_child_common_drv = {
|
|||
*
|
||||
* Returns new created child.
|
||||
*
|
||||
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
|
||||
* @child_bs can move to a different AioContext in this function. Callers must
|
||||
* make sure that their AioContext locking is still correct after this.
|
||||
* Both @parent_bs and @child_bs can move to a different AioContext in this
|
||||
* function.
|
||||
*/
|
||||
static BdrvChild * GRAPH_WRLOCK
|
||||
bdrv_attach_child_common(BlockDriverState *child_bs,
|
||||
|
@ -3062,7 +3049,7 @@ bdrv_attach_child_common(BlockDriverState *child_bs,
|
|||
Transaction *tran, Error **errp)
|
||||
{
|
||||
BdrvChild *new_child;
|
||||
AioContext *parent_ctx, *new_child_ctx;
|
||||
AioContext *parent_ctx;
|
||||
AioContext *child_ctx = bdrv_get_aio_context(child_bs);
|
||||
|
||||
assert(child_class->get_parent_desc);
|
||||
|
@ -3114,12 +3101,6 @@ bdrv_attach_child_common(BlockDriverState *child_bs,
|
|||
}
|
||||
}
|
||||
|
||||
new_child_ctx = bdrv_get_aio_context(child_bs);
|
||||
if (new_child_ctx != child_ctx) {
|
||||
aio_context_release(child_ctx);
|
||||
aio_context_acquire(new_child_ctx);
|
||||
}
|
||||
|
||||
bdrv_ref(child_bs);
|
||||
/*
|
||||
* Let every new BdrvChild start with a drained parent. Inserting the child
|
||||
|
@ -3149,20 +3130,14 @@ bdrv_attach_child_common(BlockDriverState *child_bs,
|
|||
};
|
||||
tran_add(tran, &bdrv_attach_child_common_drv, s);
|
||||
|
||||
if (new_child_ctx != child_ctx) {
|
||||
aio_context_release(new_child_ctx);
|
||||
aio_context_acquire(child_ctx);
|
||||
}
|
||||
|
||||
return new_child;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function doesn't update permissions, caller is responsible for this.
|
||||
*
|
||||
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
|
||||
* @child_bs can move to a different AioContext in this function. Callers must
|
||||
* make sure that their AioContext locking is still correct after this.
|
||||
* Both @parent_bs and @child_bs can move to a different AioContext in this
|
||||
* function.
|
||||
*
|
||||
* After calling this function, the transaction @tran may only be completed
|
||||
* while holding a writer lock for the graph.
|
||||
|
@ -3202,9 +3177,6 @@ bdrv_attach_child_noperm(BlockDriverState *parent_bs,
|
|||
*
|
||||
* On failure NULL is returned, errp is set and the reference to
|
||||
* child_bs is also dropped.
|
||||
*
|
||||
* The caller must hold the AioContext lock @child_bs, but not that of @ctx
|
||||
* (unless @child_bs is already in @ctx).
|
||||
*/
|
||||
BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
|
||||
const char *child_name,
|
||||
|
@ -3244,9 +3216,6 @@ out:
|
|||
*
|
||||
* On failure NULL is returned, errp is set and the reference to
|
||||
* child_bs is also dropped.
|
||||
*
|
||||
* If @parent_bs and @child_bs are in different AioContexts, the caller must
|
||||
* hold the AioContext lock for @child_bs, but not for @parent_bs.
|
||||
*/
|
||||
BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
|
||||
BlockDriverState *child_bs,
|
||||
|
@ -3436,9 +3405,8 @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs)
|
|||
*
|
||||
* Function doesn't update permissions, caller is responsible for this.
|
||||
*
|
||||
* The caller must hold the AioContext lock for @child_bs. Both @parent_bs and
|
||||
* @child_bs can move to a different AioContext in this function. Callers must
|
||||
* make sure that their AioContext locking is still correct after this.
|
||||
* Both @parent_bs and @child_bs can move to a different AioContext in this
|
||||
* function.
|
||||
*
|
||||
* After calling this function, the transaction @tran may only be completed
|
||||
* while holding a writer lock for the graph.
|
||||
|
@ -3531,9 +3499,8 @@ out:
|
|||
}
|
||||
|
||||
/*
|
||||
* The caller must hold the AioContext lock for @backing_hd. Both @bs and
|
||||
* @backing_hd can move to a different AioContext in this function. Callers must
|
||||
* make sure that their AioContext locking is still correct after this.
|
||||
* Both @bs and @backing_hd can move to a different AioContext in this
|
||||
* function.
|
||||
*
|
||||
* If a backing child is already present (i.e. we're detaching a node), that
|
||||
* child node must be drained.
|
||||
|
@ -3575,9 +3542,9 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
|
|||
|
||||
bdrv_ref(drain_bs);
|
||||
bdrv_drained_begin(drain_bs);
|
||||
bdrv_graph_wrlock(backing_hd);
|
||||
bdrv_graph_wrlock();
|
||||
ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp);
|
||||
bdrv_graph_wrunlock(backing_hd);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(drain_bs);
|
||||
bdrv_unref(drain_bs);
|
||||
|
||||
|
@ -3592,8 +3559,6 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
|
|||
* itself, all options starting with "${bdref_key}." are considered part of the
|
||||
* BlockdevRef.
|
||||
*
|
||||
* The caller must hold the main AioContext lock.
|
||||
*
|
||||
* TODO Can this be unified with bdrv_open_image()?
|
||||
*/
|
||||
int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
|
||||
|
@ -3605,7 +3570,6 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
|
|||
int ret = 0;
|
||||
bool implicit_backing = false;
|
||||
BlockDriverState *backing_hd;
|
||||
AioContext *backing_hd_ctx;
|
||||
QDict *options;
|
||||
QDict *tmp_parent_options = NULL;
|
||||
Error *local_err = NULL;
|
||||
|
@ -3691,11 +3655,8 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
|
|||
|
||||
/* Hook up the backing file link; drop our reference, bs owns the
|
||||
* backing_hd reference now */
|
||||
backing_hd_ctx = bdrv_get_aio_context(backing_hd);
|
||||
aio_context_acquire(backing_hd_ctx);
|
||||
ret = bdrv_set_backing_hd(bs, backing_hd, errp);
|
||||
bdrv_unref(backing_hd);
|
||||
aio_context_release(backing_hd_ctx);
|
||||
|
||||
if (ret < 0) {
|
||||
goto free_exit;
|
||||
|
@ -3767,9 +3728,7 @@ done:
|
|||
*
|
||||
* The BlockdevRef will be removed from the options QDict.
|
||||
*
|
||||
* The caller must hold the lock of the main AioContext and no other AioContext.
|
||||
* @parent can move to a different AioContext in this function. Callers must
|
||||
* make sure that their AioContext locking is still correct after this.
|
||||
* @parent can move to a different AioContext in this function.
|
||||
*/
|
||||
BdrvChild *bdrv_open_child(const char *filename,
|
||||
QDict *options, const char *bdref_key,
|
||||
|
@ -3780,7 +3739,6 @@ BdrvChild *bdrv_open_child(const char *filename,
|
|||
{
|
||||
BlockDriverState *bs;
|
||||
BdrvChild *child;
|
||||
AioContext *ctx;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
|
@ -3790,13 +3748,10 @@ BdrvChild *bdrv_open_child(const char *filename,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_graph_wrlock();
|
||||
child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role,
|
||||
errp);
|
||||
aio_context_release(ctx);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
return child;
|
||||
}
|
||||
|
@ -3804,9 +3759,7 @@ BdrvChild *bdrv_open_child(const char *filename,
|
|||
/*
|
||||
* Wrapper on bdrv_open_child() for most popular case: open primary child of bs.
|
||||
*
|
||||
* The caller must hold the lock of the main AioContext and no other AioContext.
|
||||
* @parent can move to a different AioContext in this function. Callers must
|
||||
* make sure that their AioContext locking is still correct after this.
|
||||
* @parent can move to a different AioContext in this function.
|
||||
*/
|
||||
int bdrv_open_file_child(const char *filename,
|
||||
QDict *options, const char *bdref_key,
|
||||
|
@ -3881,7 +3834,6 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
|
|||
int64_t total_size;
|
||||
QemuOpts *opts = NULL;
|
||||
BlockDriverState *bs_snapshot = NULL;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
@ -3890,9 +3842,7 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
|
|||
instead of opening 'filename' directly */
|
||||
|
||||
/* Get the required size from the image */
|
||||
aio_context_acquire(ctx);
|
||||
total_size = bdrv_getlength(bs);
|
||||
aio_context_release(ctx);
|
||||
|
||||
if (total_size < 0) {
|
||||
error_setg_errno(errp, -total_size, "Could not get image size");
|
||||
|
@ -3927,10 +3877,7 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs,
|
|||
goto out;
|
||||
}
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
ret = bdrv_append(bs_snapshot, bs, errp);
|
||||
aio_context_release(ctx);
|
||||
|
||||
if (ret < 0) {
|
||||
bs_snapshot = NULL;
|
||||
goto out;
|
||||
|
@ -3955,8 +3902,6 @@ out:
|
|||
* The reference parameter may be used to specify an existing block device which
|
||||
* should be opened. If specified, neither options nor a filename may be given,
|
||||
* nor can an existing BDS be reused (that is, *pbs has to be NULL).
|
||||
*
|
||||
* The caller must always hold the main AioContext lock.
|
||||
*/
|
||||
static BlockDriverState * no_coroutine_fn
|
||||
bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
|
||||
|
@ -3974,7 +3919,6 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
|
|||
Error *local_err = NULL;
|
||||
QDict *snapshot_options = NULL;
|
||||
int snapshot_flags = 0;
|
||||
AioContext *ctx = qemu_get_aio_context();
|
||||
|
||||
assert(!child_class || !flags);
|
||||
assert(!child_class == !parent);
|
||||
|
@ -4115,12 +4059,10 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
|
|||
/* Not requesting BLK_PERM_CONSISTENT_READ because we're only
|
||||
* looking at the header to guess the image format. This works even
|
||||
* in cases where a guest would not see a consistent state. */
|
||||
ctx = bdrv_get_aio_context(file_bs);
|
||||
aio_context_acquire(ctx);
|
||||
AioContext *ctx = bdrv_get_aio_context(file_bs);
|
||||
file = blk_new(ctx, 0, BLK_PERM_ALL);
|
||||
blk_insert_bs(file, file_bs, &local_err);
|
||||
bdrv_unref(file_bs);
|
||||
aio_context_release(ctx);
|
||||
|
||||
if (local_err) {
|
||||
goto fail;
|
||||
|
@ -4167,13 +4109,8 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
/* The AioContext could have changed during bdrv_open_common() */
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
if (file) {
|
||||
aio_context_acquire(ctx);
|
||||
blk_unref(file);
|
||||
aio_context_release(ctx);
|
||||
file = NULL;
|
||||
}
|
||||
|
||||
|
@ -4231,16 +4168,13 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options,
|
|||
* (snapshot_bs); thus, we have to drop the strong reference to bs
|
||||
* (which we obtained by calling bdrv_new()). bs will not be deleted,
|
||||
* though, because the overlay still has a reference to it. */
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_unref(bs);
|
||||
aio_context_release(ctx);
|
||||
bs = snapshot_bs;
|
||||
}
|
||||
|
||||
return bs;
|
||||
|
||||
fail:
|
||||
aio_context_acquire(ctx);
|
||||
blk_unref(file);
|
||||
qobject_unref(snapshot_options);
|
||||
qobject_unref(bs->explicit_options);
|
||||
|
@ -4249,21 +4183,17 @@ fail:
|
|||
bs->options = NULL;
|
||||
bs->explicit_options = NULL;
|
||||
bdrv_unref(bs);
|
||||
aio_context_release(ctx);
|
||||
error_propagate(errp, local_err);
|
||||
return NULL;
|
||||
|
||||
close_and_fail:
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_unref(bs);
|
||||
aio_context_release(ctx);
|
||||
qobject_unref(snapshot_options);
|
||||
qobject_unref(options);
|
||||
error_propagate(errp, local_err);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* The caller must always hold the main AioContext lock. */
|
||||
BlockDriverState *bdrv_open(const char *filename, const char *reference,
|
||||
QDict *options, int flags, Error **errp)
|
||||
{
|
||||
|
@ -4540,12 +4470,7 @@ void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue)
|
|||
if (bs_queue) {
|
||||
BlockReopenQueueEntry *bs_entry, *next;
|
||||
QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
|
||||
AioContext *ctx = bdrv_get_aio_context(bs_entry->state.bs);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_drained_end(bs_entry->state.bs);
|
||||
aio_context_release(ctx);
|
||||
|
||||
qobject_unref(bs_entry->state.explicit_options);
|
||||
qobject_unref(bs_entry->state.options);
|
||||
g_free(bs_entry);
|
||||
|
@ -4577,7 +4502,6 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
|||
{
|
||||
int ret = -1;
|
||||
BlockReopenQueueEntry *bs_entry, *next;
|
||||
AioContext *ctx;
|
||||
Transaction *tran = tran_new();
|
||||
g_autoptr(GSList) refresh_list = NULL;
|
||||
|
||||
|
@ -4586,10 +4510,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
|||
GLOBAL_STATE_CODE();
|
||||
|
||||
QTAILQ_FOREACH(bs_entry, bs_queue, entry) {
|
||||
ctx = bdrv_get_aio_context(bs_entry->state.bs);
|
||||
aio_context_acquire(ctx);
|
||||
ret = bdrv_flush(bs_entry->state.bs);
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Error flushing drive");
|
||||
goto abort;
|
||||
|
@ -4598,10 +4519,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
|||
|
||||
QTAILQ_FOREACH(bs_entry, bs_queue, entry) {
|
||||
assert(bs_entry->state.bs->quiesce_counter > 0);
|
||||
ctx = bdrv_get_aio_context(bs_entry->state.bs);
|
||||
aio_context_acquire(ctx);
|
||||
ret = bdrv_reopen_prepare(&bs_entry->state, bs_queue, tran, errp);
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
goto abort;
|
||||
}
|
||||
|
@ -4644,24 +4562,18 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
|||
* to first element.
|
||||
*/
|
||||
QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) {
|
||||
ctx = bdrv_get_aio_context(bs_entry->state.bs);
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_reopen_commit(&bs_entry->state);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
tran_commit(tran);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) {
|
||||
BlockDriverState *bs = bs_entry->state.bs;
|
||||
|
||||
if (bs->drv->bdrv_reopen_commit_post) {
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
bs->drv->bdrv_reopen_commit_post(&bs_entry->state);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4669,16 +4581,13 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
|||
goto cleanup;
|
||||
|
||||
abort:
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
tran_abort(tran);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
|
||||
if (bs_entry->prepared) {
|
||||
ctx = bdrv_get_aio_context(bs_entry->state.bs);
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_reopen_abort(&bs_entry->state);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4691,24 +4600,13 @@ cleanup:
|
|||
int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts,
|
||||
Error **errp)
|
||||
{
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
BlockReopenQueue *queue;
|
||||
int ret;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
queue = bdrv_reopen_queue(NULL, bs, opts, keep_old_opts);
|
||||
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
ret = bdrv_reopen_multiple(queue, errp);
|
||||
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return bdrv_reopen_multiple(queue, errp);
|
||||
}
|
||||
|
||||
int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
|
||||
|
@ -4743,10 +4641,7 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
|
|||
*
|
||||
* Return 0 on success, otherwise return < 0 and set @errp.
|
||||
*
|
||||
* The caller must hold the AioContext lock of @reopen_state->bs.
|
||||
* @reopen_state->bs can move to a different AioContext in this function.
|
||||
* Callers must make sure that their AioContext locking is still correct after
|
||||
* this.
|
||||
*/
|
||||
static int GRAPH_UNLOCKED
|
||||
bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
||||
|
@ -4760,7 +4655,6 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
|||
const char *child_name = is_backing ? "backing" : "file";
|
||||
QObject *value;
|
||||
const char *str;
|
||||
AioContext *ctx, *old_ctx;
|
||||
bool has_child;
|
||||
int ret;
|
||||
|
||||
|
@ -4844,25 +4738,13 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
|||
bdrv_drained_begin(old_child_bs);
|
||||
}
|
||||
|
||||
old_ctx = bdrv_get_aio_context(bs);
|
||||
ctx = bdrv_get_aio_context(new_child_bs);
|
||||
if (old_ctx != ctx) {
|
||||
aio_context_release(old_ctx);
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(new_child_bs);
|
||||
bdrv_graph_wrlock();
|
||||
|
||||
ret = bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing,
|
||||
tran, errp);
|
||||
|
||||
bdrv_graph_wrunlock_ctx(ctx);
|
||||
|
||||
if (old_ctx != ctx) {
|
||||
aio_context_release(ctx);
|
||||
aio_context_acquire(old_ctx);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
if (old_child_bs) {
|
||||
bdrv_drained_end(old_child_bs);
|
||||
|
@ -4892,8 +4774,6 @@ out_rdlock:
|
|||
* It is the responsibility of the caller to then call the abort() or
|
||||
* commit() for any other BDS that have been left in a prepare() state
|
||||
*
|
||||
* The caller must hold the AioContext lock of @reopen_state->bs.
|
||||
*
|
||||
* After calling this function, the transaction @change_child_tran may only be
|
||||
* completed while holding a writer lock for the graph.
|
||||
*/
|
||||
|
@ -5209,14 +5089,14 @@ static void bdrv_close(BlockDriverState *bs)
|
|||
bs->drv = NULL;
|
||||
}
|
||||
|
||||
bdrv_graph_wrlock(bs);
|
||||
bdrv_graph_wrlock();
|
||||
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
|
||||
bdrv_unref_child(bs, child);
|
||||
}
|
||||
|
||||
assert(!bs->backing);
|
||||
assert(!bs->file);
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
g_free(bs->opaque);
|
||||
bs->opaque = NULL;
|
||||
|
@ -5509,9 +5389,9 @@ int bdrv_drop_filter(BlockDriverState *bs, Error **errp)
|
|||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
bdrv_drained_begin(child_bs);
|
||||
bdrv_graph_wrlock(bs);
|
||||
bdrv_graph_wrlock();
|
||||
ret = bdrv_replace_node_common(bs, child_bs, true, true, errp);
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(child_bs);
|
||||
|
||||
return ret;
|
||||
|
@ -5528,8 +5408,6 @@ int bdrv_drop_filter(BlockDriverState *bs, Error **errp)
|
|||
* child.
|
||||
*
|
||||
* This function does not create any image files.
|
||||
*
|
||||
* The caller must hold the AioContext lock for @bs_top.
|
||||
*/
|
||||
int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
|
||||
Error **errp)
|
||||
|
@ -5537,7 +5415,6 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
|
|||
int ret;
|
||||
BdrvChild *child;
|
||||
Transaction *tran = tran_new();
|
||||
AioContext *old_context, *new_context = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
|
@ -5545,23 +5422,10 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
|
|||
assert(!bs_new->backing);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
old_context = bdrv_get_aio_context(bs_top);
|
||||
bdrv_drained_begin(bs_top);
|
||||
|
||||
/*
|
||||
* bdrv_drained_begin() requires that only the AioContext of the drained
|
||||
* node is locked, and at this point it can still differ from the AioContext
|
||||
* of bs_top.
|
||||
*/
|
||||
new_context = bdrv_get_aio_context(bs_new);
|
||||
aio_context_release(old_context);
|
||||
aio_context_acquire(new_context);
|
||||
bdrv_drained_begin(bs_new);
|
||||
aio_context_release(new_context);
|
||||
aio_context_acquire(old_context);
|
||||
new_context = NULL;
|
||||
|
||||
bdrv_graph_wrlock(bs_top);
|
||||
bdrv_graph_wrlock();
|
||||
|
||||
child = bdrv_attach_child_noperm(bs_new, bs_top, "backing",
|
||||
&child_of_bds, bdrv_backing_role(bs_new),
|
||||
|
@ -5571,18 +5435,6 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* bdrv_attach_child_noperm could change the AioContext of bs_top and
|
||||
* bs_new, but at least they are in the same AioContext now. This is the
|
||||
* AioContext that we need to lock for the rest of the function.
|
||||
*/
|
||||
new_context = bdrv_get_aio_context(bs_top);
|
||||
|
||||
if (old_context != new_context) {
|
||||
aio_context_release(old_context);
|
||||
aio_context_acquire(new_context);
|
||||
}
|
||||
|
||||
ret = bdrv_replace_node_noperm(bs_top, bs_new, true, tran, errp);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
|
@ -5593,16 +5445,11 @@ out:
|
|||
tran_finalize(tran, ret);
|
||||
|
||||
bdrv_refresh_limits(bs_top, NULL, NULL);
|
||||
bdrv_graph_wrunlock(bs_top);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
bdrv_drained_end(bs_top);
|
||||
bdrv_drained_end(bs_new);
|
||||
|
||||
if (new_context && old_context != new_context) {
|
||||
aio_context_release(new_context);
|
||||
aio_context_acquire(old_context);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5620,7 +5467,7 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs,
|
|||
bdrv_ref(old_bs);
|
||||
bdrv_drained_begin(old_bs);
|
||||
bdrv_drained_begin(new_bs);
|
||||
bdrv_graph_wrlock(new_bs);
|
||||
bdrv_graph_wrlock();
|
||||
|
||||
bdrv_replace_child_tran(child, new_bs, tran);
|
||||
|
||||
|
@ -5631,7 +5478,7 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs,
|
|||
|
||||
tran_finalize(tran, ret);
|
||||
|
||||
bdrv_graph_wrunlock(new_bs);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(old_bs);
|
||||
bdrv_drained_end(new_bs);
|
||||
bdrv_unref(old_bs);
|
||||
|
@ -5667,9 +5514,8 @@ static void bdrv_delete(BlockDriverState *bs)
|
|||
* after the call (even on failure), so if the caller intends to reuse the
|
||||
* dictionary, it needs to use qobject_ref() before calling bdrv_open.
|
||||
*
|
||||
* The caller holds the AioContext lock for @bs. It must make sure that @bs
|
||||
* stays in the same AioContext, i.e. @options must not refer to nodes in a
|
||||
* different AioContext.
|
||||
* The caller must make sure that @bs stays in the same AioContext, i.e.
|
||||
* @options must not refer to nodes in a different AioContext.
|
||||
*/
|
||||
BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options,
|
||||
int flags, Error **errp)
|
||||
|
@ -5697,12 +5543,8 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options,
|
|||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
aio_context_release(ctx);
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
new_node_bs = bdrv_new_open_driver_opts(drv, node_name, options, flags,
|
||||
errp);
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
aio_context_acquire(ctx);
|
||||
assert(bdrv_get_aio_context(bs) == ctx);
|
||||
|
||||
options = NULL; /* bdrv_new_open_driver() eats options */
|
||||
|
@ -5718,9 +5560,9 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options,
|
|||
bdrv_ref(bs);
|
||||
bdrv_drained_begin(bs);
|
||||
bdrv_drained_begin(new_node_bs);
|
||||
bdrv_graph_wrlock(new_node_bs);
|
||||
bdrv_graph_wrlock();
|
||||
ret = bdrv_replace_node(bs, new_node_bs, errp);
|
||||
bdrv_graph_wrunlock(new_node_bs);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(new_node_bs);
|
||||
bdrv_drained_end(bs);
|
||||
bdrv_unref(bs);
|
||||
|
@ -5975,7 +5817,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
|||
|
||||
bdrv_ref(top);
|
||||
bdrv_drained_begin(base);
|
||||
bdrv_graph_wrlock(base);
|
||||
bdrv_graph_wrlock();
|
||||
|
||||
if (!top->drv || !base->drv) {
|
||||
goto exit_wrlock;
|
||||
|
@ -6015,7 +5857,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
|||
* That's a FIXME.
|
||||
*/
|
||||
bdrv_replace_node_common(top, base, false, false, &local_err);
|
||||
bdrv_graph_wrunlock(base);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
|
@ -6052,7 +5894,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
|||
goto exit;
|
||||
|
||||
exit_wrlock:
|
||||
bdrv_graph_wrunlock(base);
|
||||
bdrv_graph_wrunlock();
|
||||
exit:
|
||||
bdrv_drained_end(base);
|
||||
bdrv_unref(top);
|
||||
|
@ -7037,12 +6879,9 @@ void bdrv_activate_all(Error **errp)
|
|||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
int ret;
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
ret = bdrv_activate(bs, errp);
|
||||
aio_context_release(aio_context);
|
||||
if (ret < 0) {
|
||||
bdrv_next_cleanup(&it);
|
||||
return;
|
||||
|
@ -7137,20 +6976,10 @@ int bdrv_inactivate_all(void)
|
|||
BlockDriverState *bs = NULL;
|
||||
BdrvNextIterator it;
|
||||
int ret = 0;
|
||||
GSList *aio_ctxs = NULL, *ctx;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
if (!g_slist_find(aio_ctxs, aio_context)) {
|
||||
aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
|
||||
aio_context_acquire(aio_context);
|
||||
}
|
||||
}
|
||||
|
||||
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
|
||||
/* Nodes with BDS parents are covered by recursion from the last
|
||||
* parent that gets inactivated. Don't inactivate them a second
|
||||
|
@ -7161,17 +6990,10 @@ int bdrv_inactivate_all(void)
|
|||
ret = bdrv_inactivate_recurse(bs);
|
||||
if (ret < 0) {
|
||||
bdrv_next_cleanup(&it);
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
|
||||
AioContext *aio_context = ctx->data;
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
g_slist_free(aio_ctxs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -7257,11 +7079,8 @@ void bdrv_unref(BlockDriverState *bs)
|
|||
static void bdrv_schedule_unref_bh(void *opaque)
|
||||
{
|
||||
BlockDriverState *bs = opaque;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_unref(bs);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -7398,8 +7217,6 @@ void bdrv_img_create(const char *filename, const char *fmt,
|
|||
return;
|
||||
}
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
|
||||
/* Create parameter list */
|
||||
create_opts = qemu_opts_append(create_opts, drv->create_opts);
|
||||
create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
|
||||
|
@ -7549,7 +7366,6 @@ out:
|
|||
qemu_opts_del(opts);
|
||||
qemu_opts_free(create_opts);
|
||||
error_propagate(errp, local_err);
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
}
|
||||
|
||||
AioContext *bdrv_get_aio_context(BlockDriverState *bs)
|
||||
|
@ -7583,33 +7399,6 @@ void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx)
|
|||
bdrv_dec_in_flight(bs);
|
||||
}
|
||||
|
||||
void coroutine_fn bdrv_co_lock(BlockDriverState *bs)
|
||||
{
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
/* In the main thread, bs->aio_context won't change concurrently */
|
||||
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
||||
|
||||
/*
|
||||
* We're in coroutine context, so we already hold the lock of the main
|
||||
* loop AioContext. Don't lock it twice to avoid deadlocks.
|
||||
*/
|
||||
assert(qemu_in_coroutine());
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void coroutine_fn bdrv_co_unlock(BlockDriverState *bs)
|
||||
{
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
assert(qemu_in_coroutine());
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static void bdrv_do_remove_aio_context_notifier(BdrvAioNotifier *ban)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
|
@ -7728,21 +7517,8 @@ static void bdrv_set_aio_context_commit(void *opaque)
|
|||
BdrvStateSetAioContext *state = (BdrvStateSetAioContext *) opaque;
|
||||
BlockDriverState *bs = (BlockDriverState *) state->bs;
|
||||
AioContext *new_context = state->new_ctx;
|
||||
AioContext *old_context = bdrv_get_aio_context(bs);
|
||||
|
||||
/*
|
||||
* Take the old AioContex when detaching it from bs.
|
||||
* At this point, new_context lock is already acquired, and we are now
|
||||
* also taking old_context. This is safe as long as bdrv_detach_aio_context
|
||||
* does not call AIO_POLL_WHILE().
|
||||
*/
|
||||
if (old_context != qemu_get_aio_context()) {
|
||||
aio_context_acquire(old_context);
|
||||
}
|
||||
bdrv_detach_aio_context(bs);
|
||||
if (old_context != qemu_get_aio_context()) {
|
||||
aio_context_release(old_context);
|
||||
}
|
||||
bdrv_attach_aio_context(bs, new_context);
|
||||
}
|
||||
|
||||
|
@ -7757,10 +7533,6 @@ static TransactionActionDrv set_aio_context = {
|
|||
*
|
||||
* Must be called from the main AioContext.
|
||||
*
|
||||
* The caller must own the AioContext lock for the old AioContext of bs, but it
|
||||
* must not own the AioContext lock for new_context (unless new_context is the
|
||||
* same as the current context of bs).
|
||||
*
|
||||
* @visited will accumulate all visited BdrvChild objects. The caller is
|
||||
* responsible for freeing the list afterwards.
|
||||
*/
|
||||
|
@ -7813,13 +7585,6 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx,
|
|||
*
|
||||
* If ignore_child is not NULL, that child (and its subgraph) will not
|
||||
* be touched.
|
||||
*
|
||||
* This function still requires the caller to take the bs current
|
||||
* AioContext lock, otherwise draining will fail since AIO_WAIT_WHILE
|
||||
* assumes the lock is always held if bs is in another AioContext.
|
||||
* For the same reason, it temporarily also holds the new AioContext, since
|
||||
* bdrv_drained_end calls BDRV_POLL_WHILE that assumes the lock is taken too.
|
||||
* Therefore the new AioContext lock must not be taken by the caller.
|
||||
*/
|
||||
int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
|
||||
BdrvChild *ignore_child, Error **errp)
|
||||
|
@ -7827,7 +7592,6 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
|
|||
Transaction *tran;
|
||||
GHashTable *visited;
|
||||
int ret;
|
||||
AioContext *old_context = bdrv_get_aio_context(bs);
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
/*
|
||||
|
@ -7846,8 +7610,8 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
|
|||
|
||||
/*
|
||||
* Linear phase: go through all callbacks collected in the transaction.
|
||||
* Run all callbacks collected in the recursion to switch all nodes
|
||||
* AioContext lock (transaction commit), or undo all changes done in the
|
||||
* Run all callbacks collected in the recursion to switch every node's
|
||||
* AioContext (transaction commit), or undo all changes done in the
|
||||
* recursion (transaction abort).
|
||||
*/
|
||||
|
||||
|
@ -7857,34 +7621,7 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
|
|||
return -EPERM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release old AioContext, it won't be needed anymore, as all
|
||||
* bdrv_drained_begin() have been called already.
|
||||
*/
|
||||
if (qemu_get_aio_context() != old_context) {
|
||||
aio_context_release(old_context);
|
||||
}
|
||||
|
||||
/*
|
||||
* Acquire new AioContext since bdrv_drained_end() is going to be called
|
||||
* after we switched all nodes in the new AioContext, and the function
|
||||
* assumes that the lock of the bs is always taken.
|
||||
*/
|
||||
if (qemu_get_aio_context() != ctx) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
|
||||
tran_commit(tran);
|
||||
|
||||
if (qemu_get_aio_context() != ctx) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
/* Re-acquire the old AioContext, since the caller takes and releases it. */
|
||||
if (qemu_get_aio_context() != old_context) {
|
||||
aio_context_acquire(old_context);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -8006,7 +7743,6 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
|
|||
const char *node_name, Error **errp)
|
||||
{
|
||||
BlockDriverState *to_replace_bs = bdrv_find_node(node_name);
|
||||
AioContext *aio_context;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
|
@ -8015,12 +7751,8 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
aio_context = bdrv_get_aio_context(to_replace_bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) {
|
||||
to_replace_bs = NULL;
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* We don't want arbitrary node of the BDS chain to be replaced only the top
|
||||
|
@ -8033,12 +7765,9 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
|
|||
"because it cannot be guaranteed that doing so would not "
|
||||
"lead to an abrupt change of visible data",
|
||||
node_name, parent_bs->node_name);
|
||||
to_replace_bs = NULL;
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
aio_context_release(aio_context);
|
||||
return to_replace_bs;
|
||||
}
|
||||
|
||||
|
|
|
@ -496,10 +496,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
|||
block_copy_set_speed(bcs, speed);
|
||||
|
||||
/* Required permissions are taken by copy-before-write filter target */
|
||||
bdrv_graph_wrlock(target);
|
||||
bdrv_graph_wrlock();
|
||||
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock(target);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
return &job->common;
|
||||
|
||||
|
|
|
@ -251,9 +251,9 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
ret = 0;
|
||||
fail_log:
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(bs, s->log_file);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
s->log_file = NULL;
|
||||
}
|
||||
fail:
|
||||
|
@ -265,10 +265,10 @@ static void blk_log_writes_close(BlockDriverState *bs)
|
|||
{
|
||||
BDRVBlkLogWritesState *s = bs->opaque;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(bs, s->log_file);
|
||||
s->log_file = NULL;
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn GRAPH_RDLOCK
|
||||
|
|
|
@ -151,10 +151,10 @@ static void blkverify_close(BlockDriverState *bs)
|
|||
{
|
||||
BDRVBlkverifyState *s = bs->opaque;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(bs, s->test_file);
|
||||
s->test_file = NULL;
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn GRAPH_RDLOCK
|
||||
|
|
|
@ -390,8 +390,6 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
|
|||
* Both sets of permissions can be changed later using blk_set_perm().
|
||||
*
|
||||
* Return the new BlockBackend on success, null on failure.
|
||||
*
|
||||
* Callers must hold the AioContext lock of @bs.
|
||||
*/
|
||||
BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
|
||||
uint64_t shared_perm, Error **errp)
|
||||
|
@ -416,8 +414,6 @@ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
|
|||
* Just as with bdrv_open(), after having called this function the reference to
|
||||
* @options belongs to the block layer (even on failure).
|
||||
*
|
||||
* Called without holding an AioContext lock.
|
||||
*
|
||||
* TODO: Remove @filename and @flags; it should be possible to specify a whole
|
||||
* BDS tree just by specifying the @options QDict (or @reference,
|
||||
* alternatively). At the time of adding this function, this is not possible,
|
||||
|
@ -429,7 +425,6 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
|
|||
{
|
||||
BlockBackend *blk;
|
||||
BlockDriverState *bs;
|
||||
AioContext *ctx;
|
||||
uint64_t perm = 0;
|
||||
uint64_t shared = BLK_PERM_ALL;
|
||||
|
||||
|
@ -459,23 +454,18 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
|
|||
shared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED;
|
||||
}
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
bs = bdrv_open(filename, reference, options, flags, errp);
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
if (!bs) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* bdrv_open() could have moved bs to a different AioContext */
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
blk = blk_new(bdrv_get_aio_context(bs), perm, shared);
|
||||
blk->perm = perm;
|
||||
blk->shared_perm = shared;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
blk_insert_bs(blk, bs, errp);
|
||||
bdrv_unref(bs);
|
||||
aio_context_release(ctx);
|
||||
|
||||
if (!blk->root) {
|
||||
blk_unref(blk);
|
||||
|
@ -577,13 +567,9 @@ void blk_remove_all_bs(void)
|
|||
GLOBAL_STATE_CODE();
|
||||
|
||||
while ((blk = blk_all_next(blk)) != NULL) {
|
||||
AioContext *ctx = blk_get_aio_context(blk);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
if (blk->root) {
|
||||
blk_remove_bs(blk);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -882,14 +868,11 @@ BlockBackend *blk_by_public(BlockBackendPublic *public)
|
|||
|
||||
/*
|
||||
* Disassociates the currently associated BlockDriverState from @blk.
|
||||
*
|
||||
* The caller must hold the AioContext lock for the BlockBackend.
|
||||
*/
|
||||
void blk_remove_bs(BlockBackend *blk)
|
||||
{
|
||||
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
|
||||
BdrvChild *root;
|
||||
AioContext *ctx;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
|
@ -919,30 +902,26 @@ void blk_remove_bs(BlockBackend *blk)
|
|||
root = blk->root;
|
||||
blk->root = NULL;
|
||||
|
||||
ctx = bdrv_get_aio_context(root->bs);
|
||||
bdrv_graph_wrlock(root->bs);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_root_unref_child(root);
|
||||
bdrv_graph_wrunlock_ctx(ctx);
|
||||
bdrv_graph_wrunlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Associates a new BlockDriverState with @blk.
|
||||
*
|
||||
* Callers must hold the AioContext lock of @bs.
|
||||
*/
|
||||
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_ref(bs);
|
||||
bdrv_graph_wrlock(bs);
|
||||
bdrv_graph_wrlock();
|
||||
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
blk->perm, blk->shared_perm,
|
||||
blk, errp);
|
||||
bdrv_graph_wrunlock_ctx(ctx);
|
||||
bdrv_graph_wrunlock();
|
||||
if (blk->root == NULL) {
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -2739,20 +2718,16 @@ int blk_commit_all(void)
|
|||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
while ((blk = blk_all_next(blk)) != NULL) {
|
||||
AioContext *aio_context = blk_get_aio_context(blk);
|
||||
BlockDriverState *unfiltered_bs = bdrv_skip_filters(blk_bs(blk));
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
if (blk_is_inserted(blk) && bdrv_cow_child(unfiltered_bs)) {
|
||||
int ret;
|
||||
|
||||
ret = bdrv_commit(unfiltered_bs);
|
||||
if (ret < 0) {
|
||||
aio_context_release(aio_context);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -100,9 +100,9 @@ static void commit_abort(Job *job)
|
|||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
bdrv_drained_begin(commit_top_backing_bs);
|
||||
bdrv_graph_wrlock(commit_top_backing_bs);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_replace_node(s->commit_top_bs, commit_top_backing_bs, &error_abort);
|
||||
bdrv_graph_wrunlock(commit_top_backing_bs);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(commit_top_backing_bs);
|
||||
|
||||
bdrv_unref(s->commit_top_bs);
|
||||
|
@ -339,7 +339,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
|||
* this is the responsibility of the interface (i.e. whoever calls
|
||||
* commit_start()).
|
||||
*/
|
||||
bdrv_graph_wrlock(top);
|
||||
bdrv_graph_wrlock();
|
||||
s->base_overlay = bdrv_find_overlay(top, base);
|
||||
assert(s->base_overlay);
|
||||
|
||||
|
@ -370,19 +370,19 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
|||
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
|
||||
iter_shared_perms, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock(top);
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
|
||||
bdrv_graph_wrunlock(top);
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
s->chain_frozen = true;
|
||||
|
||||
ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
|
||||
bdrv_graph_wrunlock(top);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
|
@ -434,9 +434,9 @@ fail:
|
|||
* otherwise this would fail because of lack of permissions. */
|
||||
if (commit_top_bs) {
|
||||
bdrv_drained_begin(top);
|
||||
bdrv_graph_wrlock(top);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_replace_node(commit_top_bs, top, &error_abort);
|
||||
bdrv_graph_wrunlock(top);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(top);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -412,7 +412,6 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
int64_t cluster_size;
|
||||
g_autoptr(BlockdevOptions) full_opts = NULL;
|
||||
BlockdevOptionsCbw *opts;
|
||||
AioContext *ctx;
|
||||
int ret;
|
||||
|
||||
full_opts = cbw_parse_options(options, errp);
|
||||
|
@ -435,15 +434,11 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
if (opts->bitmap) {
|
||||
bitmap = block_dirty_bitmap_lookup(opts->bitmap->node,
|
||||
opts->bitmap->name, NULL, errp);
|
||||
if (!bitmap) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
s->on_cbw_error = opts->has_on_cbw_error ? opts->on_cbw_error :
|
||||
|
@ -461,24 +456,21 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
s->bcs = block_copy_state_new(bs->file, s->target, bitmap, errp);
|
||||
if (!s->bcs) {
|
||||
error_prepend(errp, "Cannot create block-copy-state: ");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cluster_size = block_copy_cluster_size(s->bcs);
|
||||
|
||||
s->done_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
|
||||
if (!s->done_bitmap) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
bdrv_disable_dirty_bitmap(s->done_bitmap);
|
||||
|
||||
/* s->access_bitmap starts equal to bcs bitmap */
|
||||
s->access_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
|
||||
if (!s->access_bitmap) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
bdrv_disable_dirty_bitmap(s->access_bitmap);
|
||||
bdrv_dirty_bitmap_merge_internal(s->access_bitmap,
|
||||
|
@ -487,11 +479,7 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
|
||||
qemu_co_mutex_init(&s->lock);
|
||||
QLIST_INIT(&s->frozen_read_reqs);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
aio_context_release(ctx);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cbw_close(BlockDriverState *bs)
|
||||
|
|
|
@ -114,7 +114,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
|||
}
|
||||
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
if (export->iothread) {
|
||||
IOThread *iothread;
|
||||
|
@ -133,8 +132,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
|||
set_context_errp = fixed_iothread ? errp : NULL;
|
||||
ret = bdrv_try_change_aio_context(bs, new_ctx, NULL, set_context_errp);
|
||||
if (ret == 0) {
|
||||
aio_context_release(ctx);
|
||||
aio_context_acquire(new_ctx);
|
||||
ctx = new_ctx;
|
||||
} else if (fixed_iothread) {
|
||||
goto fail;
|
||||
|
@ -191,8 +188,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
|
|||
assert(exp->blk != NULL);
|
||||
|
||||
QLIST_INSERT_HEAD(&block_exports, exp, next);
|
||||
|
||||
aio_context_release(ctx);
|
||||
return exp;
|
||||
|
||||
fail:
|
||||
|
@ -200,7 +195,6 @@ fail:
|
|||
blk_set_dev_ops(blk, NULL, NULL);
|
||||
blk_unref(blk);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
if (exp) {
|
||||
g_free(exp->id);
|
||||
g_free(exp);
|
||||
|
@ -218,9 +212,6 @@ void blk_exp_ref(BlockExport *exp)
|
|||
static void blk_exp_delete_bh(void *opaque)
|
||||
{
|
||||
BlockExport *exp = opaque;
|
||||
AioContext *aio_context = exp->ctx;
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
assert(exp->refcount == 0);
|
||||
QLIST_REMOVE(exp, next);
|
||||
|
@ -230,8 +221,6 @@ static void blk_exp_delete_bh(void *opaque)
|
|||
qapi_event_send_block_export_deleted(exp->id);
|
||||
g_free(exp->id);
|
||||
g_free(exp);
|
||||
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
void blk_exp_unref(BlockExport *exp)
|
||||
|
@ -249,22 +238,16 @@ void blk_exp_unref(BlockExport *exp)
|
|||
* connections and other internally held references start to shut down. When
|
||||
* the function returns, there may still be active references while the export
|
||||
* is in the process of shutting down.
|
||||
*
|
||||
* Acquires exp->ctx internally. Callers must *not* hold the lock.
|
||||
*/
|
||||
void blk_exp_request_shutdown(BlockExport *exp)
|
||||
{
|
||||
AioContext *aio_context = exp->ctx;
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
/*
|
||||
* If the user doesn't own the export any more, it is already shutting
|
||||
* down. We must not call .request_shutdown and decrease the refcount a
|
||||
* second time.
|
||||
*/
|
||||
if (!exp->user_owned) {
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
exp->drv->request_shutdown(exp);
|
||||
|
@ -272,9 +255,6 @@ void blk_exp_request_shutdown(BlockExport *exp)
|
|||
assert(exp->user_owned);
|
||||
exp->user_owned = false;
|
||||
blk_exp_unref(exp);
|
||||
|
||||
out:
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -278,7 +278,6 @@ static void vu_blk_exp_resize(void *opaque)
|
|||
vu_config_change_msg(&vexp->vu_server.vu_dev);
|
||||
}
|
||||
|
||||
/* Called with vexp->export.ctx acquired */
|
||||
static void vu_blk_drained_begin(void *opaque)
|
||||
{
|
||||
VuBlkExport *vexp = opaque;
|
||||
|
@ -287,7 +286,6 @@ static void vu_blk_drained_begin(void *opaque)
|
|||
vhost_user_server_detach_aio_context(&vexp->vu_server);
|
||||
}
|
||||
|
||||
/* Called with vexp->export.blk AioContext acquired */
|
||||
static void vu_blk_drained_end(void *opaque)
|
||||
{
|
||||
VuBlkExport *vexp = opaque;
|
||||
|
@ -300,8 +298,6 @@ static void vu_blk_drained_end(void *opaque)
|
|||
* Ensures that bdrv_drained_begin() waits until in-flight requests complete
|
||||
* and the server->co_trip coroutine has terminated. It will be restarted in
|
||||
* vhost_user_server_attach_aio_context().
|
||||
*
|
||||
* Called with vexp->export.ctx acquired.
|
||||
*/
|
||||
static bool vu_blk_drained_poll(void *opaque)
|
||||
{
|
||||
|
|
|
@ -712,17 +712,11 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
|
|||
|
||||
#ifdef CONFIG_LINUX_AIO
|
||||
/* Currently Linux does AIO only for files opened with O_DIRECT */
|
||||
if (s->use_linux_aio) {
|
||||
if (!(s->open_flags & O_DIRECT)) {
|
||||
error_setg(errp, "aio=native was specified, but it requires "
|
||||
"cache.direct=on, which was not specified.");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (!aio_setup_linux_aio(bdrv_get_aio_context(bs), errp)) {
|
||||
error_prepend(errp, "Unable to use native AIO: ");
|
||||
goto fail;
|
||||
}
|
||||
if (s->use_linux_aio && !(s->open_flags & O_DIRECT)) {
|
||||
error_setg(errp, "aio=native was specified, but it requires "
|
||||
"cache.direct=on, which was not specified.");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
#else
|
||||
if (s->use_linux_aio) {
|
||||
|
@ -733,14 +727,7 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
|
|||
}
|
||||
#endif /* !defined(CONFIG_LINUX_AIO) */
|
||||
|
||||
#ifdef CONFIG_LINUX_IO_URING
|
||||
if (s->use_linux_io_uring) {
|
||||
if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs), errp)) {
|
||||
error_prepend(errp, "Unable to use io_uring: ");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
#else
|
||||
#ifndef CONFIG_LINUX_IO_URING
|
||||
if (s->use_linux_io_uring) {
|
||||
error_setg(errp, "aio=io_uring was specified, but is not supported "
|
||||
"in this build.");
|
||||
|
@ -2444,6 +2431,48 @@ static bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
|
|||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LINUX_IO_URING
|
||||
static inline bool raw_check_linux_io_uring(BDRVRawState *s)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
AioContext *ctx;
|
||||
|
||||
if (!s->use_linux_io_uring) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ctx = qemu_get_current_aio_context();
|
||||
if (unlikely(!aio_setup_linux_io_uring(ctx, &local_err))) {
|
||||
error_reportf_err(local_err, "Unable to use linux io_uring, "
|
||||
"falling back to thread pool: ");
|
||||
s->use_linux_io_uring = false;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LINUX_AIO
|
||||
static inline bool raw_check_linux_aio(BDRVRawState *s)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
AioContext *ctx;
|
||||
|
||||
if (!s->use_linux_aio) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ctx = qemu_get_current_aio_context();
|
||||
if (unlikely(!aio_setup_linux_aio(ctx, &local_err))) {
|
||||
error_reportf_err(local_err, "Unable to use Linux AIO, "
|
||||
"falling back to thread pool: ");
|
||||
s->use_linux_aio = false;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
|
||||
uint64_t bytes, QEMUIOVector *qiov, int type)
|
||||
{
|
||||
|
@ -2474,13 +2503,13 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
|
|||
if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) {
|
||||
type |= QEMU_AIO_MISALIGNED;
|
||||
#ifdef CONFIG_LINUX_IO_URING
|
||||
} else if (s->use_linux_io_uring) {
|
||||
} else if (raw_check_linux_io_uring(s)) {
|
||||
assert(qiov->size == bytes);
|
||||
ret = luring_co_submit(bs, s->fd, offset, qiov, type);
|
||||
goto out;
|
||||
#endif
|
||||
#ifdef CONFIG_LINUX_AIO
|
||||
} else if (s->use_linux_aio) {
|
||||
} else if (raw_check_linux_aio(s)) {
|
||||
assert(qiov->size == bytes);
|
||||
ret = laio_co_submit(s->fd, offset, qiov, type,
|
||||
s->aio_max_batch);
|
||||
|
@ -2567,39 +2596,13 @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
|
|||
};
|
||||
|
||||
#ifdef CONFIG_LINUX_IO_URING
|
||||
if (s->use_linux_io_uring) {
|
||||
if (raw_check_linux_io_uring(s)) {
|
||||
return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH);
|
||||
}
|
||||
#endif
|
||||
return raw_thread_pool_submit(handle_aiocb_flush, &acb);
|
||||
}
|
||||
|
||||
static void raw_aio_attach_aio_context(BlockDriverState *bs,
|
||||
AioContext *new_context)
|
||||
{
|
||||
BDRVRawState __attribute__((unused)) *s = bs->opaque;
|
||||
#ifdef CONFIG_LINUX_AIO
|
||||
if (s->use_linux_aio) {
|
||||
Error *local_err = NULL;
|
||||
if (!aio_setup_linux_aio(new_context, &local_err)) {
|
||||
error_reportf_err(local_err, "Unable to use native AIO, "
|
||||
"falling back to thread pool: ");
|
||||
s->use_linux_aio = false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_LINUX_IO_URING
|
||||
if (s->use_linux_io_uring) {
|
||||
Error *local_err = NULL;
|
||||
if (!aio_setup_linux_io_uring(new_context, &local_err)) {
|
||||
error_reportf_err(local_err, "Unable to use linux io_uring, "
|
||||
"falling back to thread pool: ");
|
||||
s->use_linux_io_uring = false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void raw_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
|
@ -3896,7 +3899,6 @@ BlockDriver bdrv_file = {
|
|||
.bdrv_co_copy_range_from = raw_co_copy_range_from,
|
||||
.bdrv_co_copy_range_to = raw_co_copy_range_to,
|
||||
.bdrv_refresh_limits = raw_refresh_limits,
|
||||
.bdrv_attach_aio_context = raw_aio_attach_aio_context,
|
||||
|
||||
.bdrv_co_truncate = raw_co_truncate,
|
||||
.bdrv_co_getlength = raw_co_getlength,
|
||||
|
@ -4266,7 +4268,6 @@ static BlockDriver bdrv_host_device = {
|
|||
.bdrv_co_copy_range_from = raw_co_copy_range_from,
|
||||
.bdrv_co_copy_range_to = raw_co_copy_range_to,
|
||||
.bdrv_refresh_limits = raw_refresh_limits,
|
||||
.bdrv_attach_aio_context = raw_aio_attach_aio_context,
|
||||
|
||||
.bdrv_co_truncate = raw_co_truncate,
|
||||
.bdrv_co_getlength = raw_co_getlength,
|
||||
|
@ -4402,7 +4403,6 @@ static BlockDriver bdrv_host_cdrom = {
|
|||
.bdrv_co_pwritev = raw_co_pwritev,
|
||||
.bdrv_co_flush_to_disk = raw_co_flush_to_disk,
|
||||
.bdrv_refresh_limits = cdrom_refresh_limits,
|
||||
.bdrv_attach_aio_context = raw_aio_attach_aio_context,
|
||||
|
||||
.bdrv_co_truncate = raw_co_truncate,
|
||||
.bdrv_co_getlength = raw_co_getlength,
|
||||
|
@ -4528,7 +4528,6 @@ static BlockDriver bdrv_host_cdrom = {
|
|||
.bdrv_co_pwritev = raw_co_pwritev,
|
||||
.bdrv_co_flush_to_disk = raw_co_flush_to_disk,
|
||||
.bdrv_refresh_limits = cdrom_refresh_limits,
|
||||
.bdrv_attach_aio_context = raw_aio_attach_aio_context,
|
||||
|
||||
.bdrv_co_truncate = raw_co_truncate,
|
||||
.bdrv_co_getlength = raw_co_getlength,
|
||||
|
|
|
@ -106,27 +106,12 @@ static uint32_t reader_count(void)
|
|||
return rd;
|
||||
}
|
||||
|
||||
void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs)
|
||||
void no_coroutine_fn bdrv_graph_wrlock(void)
|
||||
{
|
||||
AioContext *ctx = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
assert(!qatomic_read(&has_writer));
|
||||
assert(!qemu_in_coroutine());
|
||||
|
||||
/*
|
||||
* Release only non-mainloop AioContext. The mainloop often relies on the
|
||||
* BQL and doesn't lock the main AioContext before doing things.
|
||||
*/
|
||||
if (bs) {
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_release(ctx);
|
||||
} else {
|
||||
ctx = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Make sure that constantly arriving new I/O doesn't cause starvation */
|
||||
bdrv_drain_all_begin_nopoll();
|
||||
|
||||
|
@ -155,27 +140,13 @@ void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs)
|
|||
} while (reader_count() >= 1);
|
||||
|
||||
bdrv_drain_all_end();
|
||||
|
||||
if (ctx) {
|
||||
aio_context_acquire(bdrv_get_aio_context(bs));
|
||||
}
|
||||
}
|
||||
|
||||
void no_coroutine_fn bdrv_graph_wrunlock_ctx(AioContext *ctx)
|
||||
void no_coroutine_fn bdrv_graph_wrunlock(void)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
assert(qatomic_read(&has_writer));
|
||||
|
||||
/*
|
||||
* Release only non-mainloop AioContext. The mainloop often relies on the
|
||||
* BQL and doesn't lock the main AioContext before doing things.
|
||||
*/
|
||||
if (ctx && ctx != qemu_get_aio_context()) {
|
||||
aio_context_release(ctx);
|
||||
} else {
|
||||
ctx = NULL;
|
||||
}
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&aio_context_list_lock) {
|
||||
/*
|
||||
* No need for memory barriers, this works in pair with
|
||||
|
@ -197,17 +168,6 @@ void no_coroutine_fn bdrv_graph_wrunlock_ctx(AioContext *ctx)
|
|||
* progress.
|
||||
*/
|
||||
aio_bh_poll(qemu_get_aio_context());
|
||||
|
||||
if (ctx) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void no_coroutine_fn bdrv_graph_wrunlock(BlockDriverState *bs)
|
||||
{
|
||||
AioContext *ctx = bs ? bdrv_get_aio_context(bs) : NULL;
|
||||
|
||||
bdrv_graph_wrunlock_ctx(ctx);
|
||||
}
|
||||
|
||||
void coroutine_fn bdrv_graph_co_rdlock(void)
|
||||
|
|
45
block/io.c
45
block/io.c
|
@ -294,8 +294,6 @@ static void bdrv_co_drain_bh_cb(void *opaque)
|
|||
BlockDriverState *bs = data->bs;
|
||||
|
||||
if (bs) {
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_dec_in_flight(bs);
|
||||
if (data->begin) {
|
||||
bdrv_do_drained_begin(bs, data->parent, data->poll);
|
||||
|
@ -303,7 +301,6 @@ static void bdrv_co_drain_bh_cb(void *opaque)
|
|||
assert(!data->poll);
|
||||
bdrv_do_drained_end(bs, data->parent);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
} else {
|
||||
assert(data->begin);
|
||||
bdrv_drain_all_begin();
|
||||
|
@ -320,8 +317,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
|
|||
{
|
||||
BdrvCoDrainData data;
|
||||
Coroutine *self = qemu_coroutine_self();
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
|
||||
|
||||
/* Calling bdrv_drain() from a BH ensures the current coroutine yields and
|
||||
* other coroutines run if they were queued by aio_co_enter(). */
|
||||
|
@ -340,17 +335,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
|
|||
bdrv_inc_in_flight(bs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Temporarily drop the lock across yield or we would get deadlocks.
|
||||
* bdrv_co_drain_bh_cb() reaquires the lock as needed.
|
||||
*
|
||||
* When we yield below, the lock for the current context will be
|
||||
* released, so if this is actually the lock that protects bs, don't drop
|
||||
* it a second time.
|
||||
*/
|
||||
if (ctx != co_ctx) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
|
||||
bdrv_co_drain_bh_cb, &data);
|
||||
|
||||
|
@ -358,11 +342,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
|
|||
/* If we are resumed from some other event (such as an aio completion or a
|
||||
* timer callback), it is a bug in the caller that should be fixed. */
|
||||
assert(data.done);
|
||||
|
||||
/* Reacquire the AioContext of bs if we dropped it */
|
||||
if (ctx != co_ctx) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
|
||||
|
@ -478,13 +457,12 @@ static bool bdrv_drain_all_poll(void)
|
|||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
|
||||
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
|
||||
/*
|
||||
* bdrv_drain_poll() can't make changes to the graph and we hold the BQL,
|
||||
* so iterating bdrv_next_all_states() is safe.
|
||||
*/
|
||||
while ((bs = bdrv_next_all_states(bs))) {
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
result |= bdrv_drain_poll(bs, NULL, true);
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -525,11 +503,7 @@ void bdrv_drain_all_begin_nopoll(void)
|
|||
/* Quiesce all nodes, without polling in-flight requests yet. The graph
|
||||
* cannot change during this loop. */
|
||||
while ((bs = bdrv_next_all_states(bs))) {
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
bdrv_do_drained_begin(bs, NULL, false);
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -588,11 +562,7 @@ void bdrv_drain_all_end(void)
|
|||
}
|
||||
|
||||
while ((bs = bdrv_next_all_states(bs))) {
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
bdrv_do_drained_end(bs, NULL);
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
||||
|
@ -2368,15 +2338,10 @@ int bdrv_flush_all(void)
|
|||
}
|
||||
|
||||
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
|
||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||
int ret;
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
ret = bdrv_flush(bs);
|
||||
int ret = bdrv_flush(bs);
|
||||
if (ret < 0 && !result) {
|
||||
result = ret;
|
||||
}
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
@ -662,7 +662,6 @@ static int mirror_exit_common(Job *job)
|
|||
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
|
||||
BlockJob *bjob = &s->common;
|
||||
MirrorBDSOpaque *bs_opaque;
|
||||
AioContext *replace_aio_context = NULL;
|
||||
BlockDriverState *src;
|
||||
BlockDriverState *target_bs;
|
||||
BlockDriverState *mirror_top_bs;
|
||||
|
@ -677,7 +676,6 @@ static int mirror_exit_common(Job *job)
|
|||
}
|
||||
s->prepared = true;
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
|
||||
mirror_top_bs = s->mirror_top_bs;
|
||||
|
@ -742,11 +740,6 @@ static int mirror_exit_common(Job *job)
|
|||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (s->to_replace) {
|
||||
replace_aio_context = bdrv_get_aio_context(s->to_replace);
|
||||
aio_context_acquire(replace_aio_context);
|
||||
}
|
||||
|
||||
if (s->should_complete && !abort) {
|
||||
BlockDriverState *to_replace = s->to_replace ?: src;
|
||||
bool ro = bdrv_is_read_only(to_replace);
|
||||
|
@ -764,7 +757,7 @@ static int mirror_exit_common(Job *job)
|
|||
* check for an op blocker on @to_replace, and we have our own
|
||||
* there.
|
||||
*/
|
||||
bdrv_graph_wrlock(target_bs);
|
||||
bdrv_graph_wrlock();
|
||||
if (bdrv_recurse_can_replace(src, to_replace)) {
|
||||
bdrv_replace_node(to_replace, target_bs, &local_err);
|
||||
} else {
|
||||
|
@ -773,7 +766,7 @@ static int mirror_exit_common(Job *job)
|
|||
"would not lead to an abrupt change of visible data",
|
||||
to_replace->node_name, target_bs->node_name);
|
||||
}
|
||||
bdrv_graph_wrunlock(target_bs);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(to_replace);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
|
@ -785,9 +778,6 @@ static int mirror_exit_common(Job *job)
|
|||
error_free(s->replace_blocker);
|
||||
bdrv_unref(s->to_replace);
|
||||
}
|
||||
if (replace_aio_context) {
|
||||
aio_context_release(replace_aio_context);
|
||||
}
|
||||
g_free(s->replaces);
|
||||
|
||||
/*
|
||||
|
@ -796,9 +786,9 @@ static int mirror_exit_common(Job *job)
|
|||
* valid.
|
||||
*/
|
||||
block_job_remove_all_bdrv(bjob);
|
||||
bdrv_graph_wrlock(mirror_top_bs);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
|
||||
bdrv_graph_wrunlock(mirror_top_bs);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
bdrv_drained_end(target_bs);
|
||||
bdrv_unref(target_bs);
|
||||
|
@ -811,8 +801,6 @@ static int mirror_exit_common(Job *job)
|
|||
bdrv_unref(mirror_top_bs);
|
||||
bdrv_unref(src);
|
||||
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1191,24 +1179,17 @@ static void mirror_complete(Job *job, Error **errp)
|
|||
|
||||
/* block all operations on to_replace bs */
|
||||
if (s->replaces) {
|
||||
AioContext *replace_aio_context;
|
||||
|
||||
s->to_replace = bdrv_find_node(s->replaces);
|
||||
if (!s->to_replace) {
|
||||
error_setg(errp, "Node name '%s' not found", s->replaces);
|
||||
return;
|
||||
}
|
||||
|
||||
replace_aio_context = bdrv_get_aio_context(s->to_replace);
|
||||
aio_context_acquire(replace_aio_context);
|
||||
|
||||
/* TODO Translate this into child freeze system. */
|
||||
error_setg(&s->replace_blocker,
|
||||
"block device is in use by block-job-complete");
|
||||
bdrv_op_block_all(s->to_replace, s->replace_blocker);
|
||||
bdrv_ref(s->to_replace);
|
||||
|
||||
aio_context_release(replace_aio_context);
|
||||
}
|
||||
|
||||
s->should_complete = true;
|
||||
|
@ -1914,13 +1895,13 @@ static BlockJob *mirror_start_job(
|
|||
*/
|
||||
bdrv_disable_dirty_bitmap(s->dirty_bitmap);
|
||||
|
||||
bdrv_graph_wrlock(bs);
|
||||
bdrv_graph_wrlock();
|
||||
ret = block_job_add_bdrv(&s->common, "source", bs, 0,
|
||||
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
|
||||
BLK_PERM_CONSISTENT_READ,
|
||||
errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -1965,17 +1946,17 @@ static BlockJob *mirror_start_job(
|
|||
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
|
||||
iter_shared_perms, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
QTAILQ_INIT(&s->ops_in_flight);
|
||||
|
||||
|
@ -2001,12 +1982,12 @@ fail:
|
|||
|
||||
bs_opaque->stop = true;
|
||||
bdrv_drained_begin(bs);
|
||||
bdrv_graph_wrlock(bs);
|
||||
bdrv_graph_wrlock();
|
||||
assert(mirror_top_bs->backing->bs == bs);
|
||||
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
|
||||
&error_abort);
|
||||
bdrv_replace_node(mirror_top_bs, bs, &error_abort);
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(bs);
|
||||
|
||||
bdrv_unref(mirror_top_bs);
|
||||
|
|
|
@ -95,7 +95,6 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
|
|||
{
|
||||
BlockDriverState *bs;
|
||||
BdrvDirtyBitmap *bitmap;
|
||||
AioContext *aio_context;
|
||||
|
||||
if (!name || name[0] == '\0') {
|
||||
error_setg(errp, "Bitmap name cannot be empty");
|
||||
|
@ -107,14 +106,11 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
|
|||
return;
|
||||
}
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
if (has_granularity) {
|
||||
if (granularity < 512 || !is_power_of_2(granularity)) {
|
||||
error_setg(errp, "Granularity must be power of 2 "
|
||||
"and at least 512");
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
/* Default to cluster size, if available: */
|
||||
|
@ -132,12 +128,12 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
|
|||
if (persistent &&
|
||||
!bdrv_can_store_new_dirty_bitmap(bs, name, granularity, errp))
|
||||
{
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
bitmap = bdrv_create_dirty_bitmap(bs, granularity, name, errp);
|
||||
if (bitmap == NULL) {
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
if (disabled) {
|
||||
|
@ -145,9 +141,6 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
|
|||
}
|
||||
|
||||
bdrv_dirty_bitmap_set_persistence(bitmap, persistent);
|
||||
|
||||
out:
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
|
||||
|
@ -157,7 +150,6 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
|
|||
{
|
||||
BlockDriverState *bs;
|
||||
BdrvDirtyBitmap *bitmap;
|
||||
AioContext *aio_context;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
|
@ -166,19 +158,14 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_BUSY | BDRV_BITMAP_RO,
|
||||
errp)) {
|
||||
aio_context_release(aio_context);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (bdrv_dirty_bitmap_get_persistence(bitmap) &&
|
||||
bdrv_remove_persistent_dirty_bitmap(bs, name, errp) < 0)
|
||||
{
|
||||
aio_context_release(aio_context);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -190,7 +177,6 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
|
|||
*bitmap_bs = bs;
|
||||
}
|
||||
|
||||
aio_context_release(aio_context);
|
||||
return release ? NULL : bitmap;
|
||||
}
|
||||
|
||||
|
|
|
@ -141,7 +141,6 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict)
|
|||
const char *id = qdict_get_str(qdict, "id");
|
||||
BlockBackend *blk;
|
||||
BlockDriverState *bs;
|
||||
AioContext *aio_context;
|
||||
Error *local_err = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
@ -168,14 +167,10 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict)
|
|||
return;
|
||||
}
|
||||
|
||||
aio_context = blk_get_aio_context(blk);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
bs = blk_bs(blk);
|
||||
if (bs) {
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) {
|
||||
error_report_err(local_err);
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -196,8 +191,6 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict)
|
|||
} else {
|
||||
blk_unref(blk);
|
||||
}
|
||||
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
void hmp_commit(Monitor *mon, const QDict *qdict)
|
||||
|
@ -213,7 +206,6 @@ void hmp_commit(Monitor *mon, const QDict *qdict)
|
|||
ret = blk_commit_all();
|
||||
} else {
|
||||
BlockDriverState *bs;
|
||||
AioContext *aio_context;
|
||||
|
||||
blk = blk_by_name(device);
|
||||
if (!blk) {
|
||||
|
@ -222,18 +214,13 @@ void hmp_commit(Monitor *mon, const QDict *qdict)
|
|||
}
|
||||
|
||||
bs = bdrv_skip_implicit_filters(blk_bs(blk));
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
if (!blk_is_available(blk)) {
|
||||
error_report("Device '%s' has no medium", device);
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = bdrv_commit(bs);
|
||||
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
if (ret < 0) {
|
||||
error_report("'commit' error for '%s': %s", device, strerror(-ret));
|
||||
|
@ -560,7 +547,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
|
|||
BlockBackend *blk = NULL;
|
||||
BlockDriverState *bs = NULL;
|
||||
BlockBackend *local_blk = NULL;
|
||||
AioContext *ctx = NULL;
|
||||
bool qdev = qdict_get_try_bool(qdict, "qdev", false);
|
||||
const char *device = qdict_get_str(qdict, "device");
|
||||
const char *command = qdict_get_str(qdict, "command");
|
||||
|
@ -582,9 +568,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
|
|||
}
|
||||
}
|
||||
|
||||
ctx = blk ? blk_get_aio_context(blk) : bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
if (bs) {
|
||||
blk = local_blk = blk_new(bdrv_get_aio_context(bs), 0, BLK_PERM_ALL);
|
||||
ret = blk_insert_bs(blk, bs, &err);
|
||||
|
@ -622,11 +605,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
|
|||
|
||||
fail:
|
||||
blk_unref(local_blk);
|
||||
|
||||
if (ctx) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
hmp_handle_error(mon, err);
|
||||
}
|
||||
|
||||
|
@ -882,7 +860,6 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
|
|||
int nb_sns, i;
|
||||
int total;
|
||||
int *global_snapshots;
|
||||
AioContext *aio_context;
|
||||
|
||||
typedef struct SnapshotEntry {
|
||||
QEMUSnapshotInfo sn;
|
||||
|
@ -909,11 +886,8 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
|
|||
error_report_err(err);
|
||||
return;
|
||||
}
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
nb_sns = bdrv_snapshot_list(bs, &sn_tab);
|
||||
aio_context_release(aio_context);
|
||||
|
||||
if (nb_sns < 0) {
|
||||
monitor_printf(mon, "bdrv_snapshot_list: error %d\n", nb_sns);
|
||||
|
@ -924,9 +898,7 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
|
|||
int bs1_nb_sns = 0;
|
||||
ImageEntry *ie;
|
||||
SnapshotEntry *se;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs1);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
if (bdrv_can_snapshot(bs1)) {
|
||||
sn = NULL;
|
||||
bs1_nb_sns = bdrv_snapshot_list(bs1, &sn);
|
||||
|
@ -944,7 +916,6 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
|
|||
}
|
||||
g_free(sn);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
if (no_snapshot) {
|
||||
|
|
|
@ -174,7 +174,6 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp)
|
|||
{
|
||||
BlockBackend *blk;
|
||||
BlockDriverState *bs;
|
||||
AioContext *aio_context;
|
||||
bool has_attached_device;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
@ -204,13 +203,10 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp)
|
|||
return;
|
||||
}
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
|
@ -223,9 +219,6 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp)
|
|||
* value passed here (i.e. false). */
|
||||
blk_dev_change_media_cb(blk, false, &error_abort);
|
||||
}
|
||||
|
||||
out:
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
void qmp_blockdev_remove_medium(const char *id, Error **errp)
|
||||
|
@ -237,7 +230,6 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
|
|||
BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
AioContext *ctx;
|
||||
bool has_device;
|
||||
int ret;
|
||||
|
||||
|
@ -259,11 +251,7 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk,
|
|||
return;
|
||||
}
|
||||
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
ret = blk_insert_bs(blk, bs, errp);
|
||||
aio_context_release(ctx);
|
||||
|
||||
if (ret < 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -374,9 +362,7 @@ void qmp_blockdev_change_medium(const char *device,
|
|||
qdict_put_str(options, "driver", format);
|
||||
}
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp);
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
|
||||
if (!medium_bs) {
|
||||
goto fail;
|
||||
|
@ -437,20 +423,16 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
|
|||
ThrottleConfig cfg;
|
||||
BlockDriverState *bs;
|
||||
BlockBackend *blk;
|
||||
AioContext *aio_context;
|
||||
|
||||
blk = qmp_get_blk(arg->device, arg->id, errp);
|
||||
if (!blk) {
|
||||
return;
|
||||
}
|
||||
|
||||
aio_context = blk_get_aio_context(blk);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
bs = blk_bs(blk);
|
||||
if (!bs) {
|
||||
error_setg(errp, "Device has no medium");
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
throttle_config_init(&cfg);
|
||||
|
@ -505,7 +487,7 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
|
|||
}
|
||||
|
||||
if (!throttle_is_valid(&cfg, errp)) {
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
if (throttle_enabled(&cfg)) {
|
||||
|
@ -522,9 +504,6 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp)
|
|||
/* If all throttling settings are set to 0, disable I/O limits */
|
||||
blk_io_limits_disable(blk);
|
||||
}
|
||||
|
||||
out:
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
void qmp_block_latency_histogram_set(
|
||||
|
|
18
block/qapi.c
18
block/qapi.c
|
@ -234,13 +234,11 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp)
|
|||
int ret;
|
||||
Error *err = NULL;
|
||||
|
||||
aio_context_acquire(bdrv_get_aio_context(bs));
|
||||
|
||||
size = bdrv_getlength(bs);
|
||||
if (size < 0) {
|
||||
error_setg_errno(errp, -size, "Can't get image size '%s'",
|
||||
bs->exact_filename);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_refresh_filename(bs);
|
||||
|
@ -265,7 +263,7 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp)
|
|||
info->format_specific = bdrv_get_specific_info(bs, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
backing_filename = bs->backing_file;
|
||||
if (backing_filename[0] != '\0') {
|
||||
|
@ -300,11 +298,8 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp)
|
|||
break;
|
||||
default:
|
||||
error_propagate(errp, err);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
out:
|
||||
aio_context_release(bdrv_get_aio_context(bs));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -709,15 +704,10 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
|
|||
/* Just to be safe if query_nodes is not always initialized */
|
||||
if (has_query_nodes && query_nodes) {
|
||||
for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) {
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
QAPI_LIST_APPEND(tail, bdrv_query_bds_stats(bs, false));
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
} else {
|
||||
for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
|
||||
AioContext *ctx = blk_get_aio_context(blk);
|
||||
BlockStats *s;
|
||||
char *qdev;
|
||||
|
||||
|
@ -725,7 +715,6 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
|
|||
continue;
|
||||
}
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
s = bdrv_query_bds_stats(blk_bs(blk), true);
|
||||
s->device = g_strdup(blk_name(blk));
|
||||
|
||||
|
@ -737,7 +726,6 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
|
|||
}
|
||||
|
||||
bdrv_query_blk_stats(s->stats, blk);
|
||||
aio_context_release(ctx);
|
||||
|
||||
QAPI_LIST_APPEND(tail, s);
|
||||
}
|
||||
|
|
|
@ -2807,9 +2807,9 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file)
|
|||
if (close_data_file && has_data_file(bs)) {
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(bs, s->data_file);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
s->data_file = NULL;
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
}
|
||||
|
|
|
@ -1037,14 +1037,14 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
|
||||
close_exit:
|
||||
/* cleanup on error */
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
for (i = 0; i < s->num_children; i++) {
|
||||
if (!opened[i]) {
|
||||
continue;
|
||||
}
|
||||
bdrv_unref_child(bs, s->children[i]);
|
||||
}
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
g_free(s->children);
|
||||
g_free(opened);
|
||||
exit:
|
||||
|
@ -1057,11 +1057,11 @@ static void quorum_close(BlockDriverState *bs)
|
|||
BDRVQuorumState *s = bs->opaque;
|
||||
int i;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
for (i = 0; i < s->num_children; i++) {
|
||||
bdrv_unref_child(bs, s->children[i]);
|
||||
}
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
g_free(s->children);
|
||||
}
|
||||
|
|
|
@ -470,7 +470,6 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
Error **errp)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
AioContext *ctx;
|
||||
bool has_size;
|
||||
uint64_t offset, size;
|
||||
BdrvChildRole file_role;
|
||||
|
@ -522,11 +521,7 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
bs->file->bs->filename);
|
||||
}
|
||||
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
ret = raw_apply_options(bs, s, offset, has_size, size, errp);
|
||||
aio_context_release(ctx);
|
||||
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -394,14 +394,7 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
|
|||
}
|
||||
|
||||
if (reopen_queue) {
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
bdrv_reopen_multiple(reopen_queue, errp);
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -462,14 +455,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
BlockDriverState *top_bs;
|
||||
BdrvChild *active_disk, *hidden_disk, *secondary_disk;
|
||||
int64_t active_length, hidden_length, disk_length;
|
||||
AioContext *aio_context;
|
||||
Error *local_err = NULL;
|
||||
BackupPerf perf = { .use_copy_range = true, .max_workers = 1 };
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
s = bs->opaque;
|
||||
|
||||
if (s->stage == BLOCK_REPLICATION_DONE ||
|
||||
|
@ -479,20 +469,17 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
* Ignore the request because the secondary side of replication
|
||||
* doesn't have to do anything anymore.
|
||||
*/
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->stage != BLOCK_REPLICATION_NONE) {
|
||||
error_setg(errp, "Block replication is running or done");
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->mode != mode) {
|
||||
error_setg(errp, "The parameter mode's value is invalid, needs %d,"
|
||||
" but got %d", s->mode, mode);
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -505,7 +492,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
if (!active_disk || !active_disk->bs || !active_disk->bs->backing) {
|
||||
error_setg(errp, "Active disk doesn't have backing file");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -513,7 +499,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
if (!hidden_disk->bs || !hidden_disk->bs->backing) {
|
||||
error_setg(errp, "Hidden disk doesn't have backing file");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -521,7 +506,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
if (!secondary_disk->bs || !bdrv_has_blk(secondary_disk->bs)) {
|
||||
error_setg(errp, "The secondary disk doesn't have block backend");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
@ -534,7 +518,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
active_length != hidden_length || hidden_length != disk_length) {
|
||||
error_setg(errp, "Active disk, hidden disk, secondary disk's length"
|
||||
" are not the same");
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -546,7 +529,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
!hidden_disk->bs->drv->bdrv_make_empty) {
|
||||
error_setg(errp,
|
||||
"Active disk or hidden disk doesn't support make_empty");
|
||||
aio_context_release(aio_context);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return;
|
||||
}
|
||||
|
@ -556,11 +538,10 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
reopen_backing_file(bs, true, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_graph_wrlock(bs);
|
||||
bdrv_graph_wrlock();
|
||||
|
||||
bdrv_ref(hidden_disk->bs);
|
||||
s->hidden_disk = bdrv_attach_child(bs, hidden_disk->bs, "hidden disk",
|
||||
|
@ -568,8 +549,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
bdrv_graph_wrunlock(bs);
|
||||
aio_context_release(aio_context);
|
||||
bdrv_graph_wrunlock();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -579,8 +559,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
BDRV_CHILD_DATA, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
bdrv_graph_wrunlock(bs);
|
||||
aio_context_release(aio_context);
|
||||
bdrv_graph_wrunlock();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -592,15 +571,14 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
if (!top_bs || !bdrv_is_root_node(top_bs) ||
|
||||
!check_top_bs(top_bs, bs)) {
|
||||
error_setg(errp, "No top_bs or it is invalid");
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
reopen_backing_file(bs, false, NULL);
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
bdrv_op_block_all(top_bs, s->blocker);
|
||||
bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
|
||||
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
s->backup_job = backup_job_create(
|
||||
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
|
||||
|
@ -612,13 +590,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
backup_job_cleanup(bs);
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
job_start(&s->backup_job->job);
|
||||
break;
|
||||
default:
|
||||
aio_context_release(aio_context);
|
||||
abort();
|
||||
}
|
||||
|
||||
|
@ -629,18 +605,12 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
}
|
||||
|
||||
s->error = 0;
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
static void replication_do_checkpoint(ReplicationState *rs, Error **errp)
|
||||
{
|
||||
BlockDriverState *bs = rs->opaque;
|
||||
BDRVReplicationState *s;
|
||||
AioContext *aio_context;
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
s = bs->opaque;
|
||||
BDRVReplicationState *s = bs->opaque;
|
||||
|
||||
if (s->stage == BLOCK_REPLICATION_DONE ||
|
||||
s->stage == BLOCK_REPLICATION_FAILOVER) {
|
||||
|
@ -649,38 +619,28 @@ static void replication_do_checkpoint(ReplicationState *rs, Error **errp)
|
|||
* Ignore the request because the secondary side of replication
|
||||
* doesn't have to do anything anymore.
|
||||
*/
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->mode == REPLICATION_MODE_SECONDARY) {
|
||||
secondary_do_checkpoint(bs, errp);
|
||||
}
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
static void replication_get_error(ReplicationState *rs, Error **errp)
|
||||
{
|
||||
BlockDriverState *bs = rs->opaque;
|
||||
BDRVReplicationState *s;
|
||||
AioContext *aio_context;
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
s = bs->opaque;
|
||||
BDRVReplicationState *s = bs->opaque;
|
||||
|
||||
if (s->stage == BLOCK_REPLICATION_NONE) {
|
||||
error_setg(errp, "Block replication is not running");
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->error) {
|
||||
error_setg(errp, "I/O error occurred");
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
static void replication_done(void *opaque, int ret)
|
||||
|
@ -691,12 +651,12 @@ static void replication_done(void *opaque, int ret)
|
|||
if (ret == 0) {
|
||||
s->stage = BLOCK_REPLICATION_DONE;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(bs, s->secondary_disk);
|
||||
s->secondary_disk = NULL;
|
||||
bdrv_unref_child(bs, s->hidden_disk);
|
||||
s->hidden_disk = NULL;
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
s->error = 0;
|
||||
} else {
|
||||
|
@ -708,12 +668,7 @@ static void replication_done(void *opaque, int ret)
|
|||
static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
|
||||
{
|
||||
BlockDriverState *bs = rs->opaque;
|
||||
BDRVReplicationState *s;
|
||||
AioContext *aio_context;
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
s = bs->opaque;
|
||||
BDRVReplicationState *s = bs->opaque;
|
||||
|
||||
if (s->stage == BLOCK_REPLICATION_DONE ||
|
||||
s->stage == BLOCK_REPLICATION_FAILOVER) {
|
||||
|
@ -722,13 +677,11 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
|
|||
* Ignore the request because the secondary side of replication
|
||||
* doesn't have to do anything anymore.
|
||||
*/
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->stage != BLOCK_REPLICATION_RUNNING) {
|
||||
error_setg(errp, "Block replication is not running");
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -744,15 +697,12 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
|
|||
* disk, secondary disk in backup_job_completed().
|
||||
*/
|
||||
if (s->backup_job) {
|
||||
aio_context_release(aio_context);
|
||||
job_cancel_sync(&s->backup_job->job, true);
|
||||
aio_context_acquire(aio_context);
|
||||
}
|
||||
|
||||
if (!failover) {
|
||||
secondary_do_checkpoint(bs, errp);
|
||||
s->stage = BLOCK_REPLICATION_DONE;
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -765,10 +715,8 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
|
|||
bdrv_graph_rdunlock_main_loop();
|
||||
break;
|
||||
default:
|
||||
aio_context_release(aio_context);
|
||||
abort();
|
||||
}
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
static const char *const replication_strong_runtime_opts[] = {
|
||||
|
|
|
@ -196,8 +196,10 @@ bdrv_snapshot_fallback(BlockDriverState *bs)
|
|||
int bdrv_can_snapshot(BlockDriverState *bs)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
|
||||
|
||||
if (!drv || !bdrv_is_inserted(bs) || !bdrv_is_writable(bs)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -290,9 +292,9 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
/* .bdrv_open() will re-attach it */
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(bs, fallback);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp);
|
||||
open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err);
|
||||
|
@ -525,9 +527,7 @@ static bool GRAPH_RDLOCK bdrv_all_snapshots_includes_bs(BlockDriverState *bs)
|
|||
return bdrv_has_blk(bs) || QLIST_EMPTY(&bs->parents);
|
||||
}
|
||||
|
||||
/* Group operations. All block drivers are involved.
|
||||
* These functions will properly handle dataplane (take aio_context_acquire
|
||||
* when appropriate for appropriate block drivers) */
|
||||
/* Group operations. All block drivers are involved. */
|
||||
|
||||
bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
|
||||
Error **errp)
|
||||
|
@ -545,14 +545,11 @@ bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
|
|||
iterbdrvs = bdrvs;
|
||||
while (iterbdrvs) {
|
||||
BlockDriverState *bs = iterbdrvs->data;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
bool ok = true;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
if (devices || bdrv_all_snapshots_includes_bs(bs)) {
|
||||
ok = bdrv_can_snapshot(bs);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
if (!ok) {
|
||||
error_setg(errp, "Device '%s' is writable but does not support "
|
||||
"snapshots", bdrv_get_device_or_node_name(bs));
|
||||
|
@ -582,18 +579,15 @@ int bdrv_all_delete_snapshot(const char *name,
|
|||
iterbdrvs = bdrvs;
|
||||
while (iterbdrvs) {
|
||||
BlockDriverState *bs = iterbdrvs->data;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
QEMUSnapshotInfo sn1, *snapshot = &sn1;
|
||||
int ret = 0;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
if ((devices || bdrv_all_snapshots_includes_bs(bs)) &&
|
||||
bdrv_snapshot_find(bs, snapshot, name) >= 0)
|
||||
{
|
||||
ret = bdrv_snapshot_delete(bs, snapshot->id_str,
|
||||
snapshot->name, errp);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
error_prepend(errp, "Could not delete snapshot '%s' on '%s': ",
|
||||
name, bdrv_get_device_or_node_name(bs));
|
||||
|
@ -628,17 +622,14 @@ int bdrv_all_goto_snapshot(const char *name,
|
|||
iterbdrvs = bdrvs;
|
||||
while (iterbdrvs) {
|
||||
BlockDriverState *bs = iterbdrvs->data;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
bool all_snapshots_includes_bs;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
all_snapshots_includes_bs = bdrv_all_snapshots_includes_bs(bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
ret = (devices || all_snapshots_includes_bs) ?
|
||||
bdrv_snapshot_goto(bs, name, errp) : 0;
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
error_prepend(errp, "Could not load snapshot '%s' on '%s': ",
|
||||
|
@ -670,15 +661,12 @@ int bdrv_all_has_snapshot(const char *name,
|
|||
iterbdrvs = bdrvs;
|
||||
while (iterbdrvs) {
|
||||
BlockDriverState *bs = iterbdrvs->data;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
QEMUSnapshotInfo sn;
|
||||
int ret = 0;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
if (devices || bdrv_all_snapshots_includes_bs(bs)) {
|
||||
ret = bdrv_snapshot_find(bs, &sn, name);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT) {
|
||||
return 0;
|
||||
|
@ -715,10 +703,8 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
|
|||
iterbdrvs = bdrvs;
|
||||
while (iterbdrvs) {
|
||||
BlockDriverState *bs = iterbdrvs->data;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
int ret = 0;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
if (bs == vm_state_bs) {
|
||||
sn->vm_state_size = vm_state_size;
|
||||
ret = bdrv_snapshot_create(bs, sn);
|
||||
|
@ -726,7 +712,6 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
|
|||
sn->vm_state_size = 0;
|
||||
ret = bdrv_snapshot_create(bs, sn);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Could not create snapshot '%s' on '%s'",
|
||||
sn->name, bdrv_get_device_or_node_name(bs));
|
||||
|
@ -757,13 +742,10 @@ BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs,
|
|||
iterbdrvs = bdrvs;
|
||||
while (iterbdrvs) {
|
||||
BlockDriverState *bs = iterbdrvs->data;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
bool found = false;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
found = (devices || bdrv_all_snapshots_includes_bs(bs)) &&
|
||||
bdrv_can_snapshot(bs);
|
||||
aio_context_release(ctx);
|
||||
|
||||
if (vmstate_bs) {
|
||||
if (g_str_equal(vmstate_bs,
|
||||
|
|
|
@ -99,9 +99,9 @@ static int stream_prepare(Job *job)
|
|||
}
|
||||
}
|
||||
|
||||
bdrv_graph_wrlock(s->target_bs);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_set_backing_hd_drained(unfiltered_bs, base, &local_err);
|
||||
bdrv_graph_wrunlock(s->target_bs);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
/*
|
||||
* This call will do I/O, so the graph can change again from here on.
|
||||
|
@ -366,10 +366,10 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
|||
* already have our own plans. Also don't allow resize as the image size is
|
||||
* queried only at the job start and then cached.
|
||||
*/
|
||||
bdrv_graph_wrlock(bs);
|
||||
bdrv_graph_wrlock();
|
||||
if (block_job_add_bdrv(&s->common, "active node", bs, 0,
|
||||
basic_flags | BLK_PERM_WRITE, errp)) {
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -389,11 +389,11 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
|||
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
|
||||
basic_flags, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
s->base_overlay = base_overlay;
|
||||
s->above_base = above_base;
|
||||
|
|
20
block/vmdk.c
20
block/vmdk.c
|
@ -272,7 +272,7 @@ static void vmdk_free_extents(BlockDriverState *bs)
|
|||
BDRVVmdkState *s = bs->opaque;
|
||||
VmdkExtent *e;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
for (i = 0; i < s->num_extents; i++) {
|
||||
e = &s->extents[i];
|
||||
g_free(e->l1_table);
|
||||
|
@ -283,7 +283,7 @@ static void vmdk_free_extents(BlockDriverState *bs)
|
|||
bdrv_unref_child(bs, e->file);
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
g_free(s->extents);
|
||||
}
|
||||
|
@ -1247,9 +1247,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
|||
0, 0, 0, 0, 0, &extent, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
|
@ -1266,9 +1266,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
|||
g_free(buf);
|
||||
if (ret) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
|
@ -1277,9 +1277,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
|||
ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp);
|
||||
if (ret) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
|
@ -1287,9 +1287,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
|||
} else {
|
||||
error_setg(errp, "Unsupported extent type '%s'", type);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = -ENOTSUP;
|
||||
goto out;
|
||||
|
|
|
@ -33,7 +33,6 @@ void qmp_block_set_write_threshold(const char *node_name,
|
|||
Error **errp)
|
||||
{
|
||||
BlockDriverState *bs;
|
||||
AioContext *aio_context;
|
||||
|
||||
bs = bdrv_find_node(node_name);
|
||||
if (!bs) {
|
||||
|
@ -41,12 +40,7 @@ void qmp_block_set_write_threshold(const char *node_name,
|
|||
return;
|
||||
}
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
bdrv_write_threshold_set(bs, threshold_bytes);
|
||||
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
void bdrv_write_threshold_check_write(BlockDriverState *bs, int64_t offset,
|
||||
|
|
320
blockdev.c
320
blockdev.c
File diff suppressed because it is too large
Load Diff
30
blockjob.c
30
blockjob.c
|
@ -198,9 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
|
|||
* one to make sure that such a concurrent access does not attempt
|
||||
* to process an already freed BdrvChild.
|
||||
*/
|
||||
aio_context_release(job->job.aio_context);
|
||||
bdrv_graph_wrlock(NULL);
|
||||
aio_context_acquire(job->job.aio_context);
|
||||
bdrv_graph_wrlock();
|
||||
while (job->nodes) {
|
||||
GSList *l = job->nodes;
|
||||
BdrvChild *c = l->data;
|
||||
|
@ -212,7 +210,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
|
|||
|
||||
g_slist_free_1(l);
|
||||
}
|
||||
bdrv_graph_wrunlock_ctx(job->job.aio_context);
|
||||
bdrv_graph_wrunlock();
|
||||
}
|
||||
|
||||
bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
|
||||
|
@ -234,28 +232,12 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
|
|||
uint64_t perm, uint64_t shared_perm, Error **errp)
|
||||
{
|
||||
BdrvChild *c;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
bool need_context_ops;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_ref(bs);
|
||||
|
||||
need_context_ops = ctx != job->job.aio_context;
|
||||
|
||||
if (need_context_ops) {
|
||||
if (job->job.aio_context != qemu_get_aio_context()) {
|
||||
aio_context_release(job->job.aio_context);
|
||||
}
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
|
||||
errp);
|
||||
if (need_context_ops) {
|
||||
aio_context_release(ctx);
|
||||
if (job->job.aio_context != qemu_get_aio_context()) {
|
||||
aio_context_acquire(job->job.aio_context);
|
||||
}
|
||||
}
|
||||
if (c == NULL) {
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -514,7 +496,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
|||
int ret;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_wrlock(bs);
|
||||
bdrv_graph_wrlock();
|
||||
|
||||
if (job_id == NULL && !(flags & JOB_INTERNAL)) {
|
||||
job_id = bdrv_get_device_name(bs);
|
||||
|
@ -523,7 +505,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
|||
job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
|
||||
flags, cb, opaque, errp);
|
||||
if (job == NULL) {
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -563,11 +545,11 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
return job;
|
||||
|
||||
fail:
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_graph_wrunlock();
|
||||
job_early_fail(&job->job);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -88,27 +88,18 @@ loop, depending on which AioContext instance the caller passes in.
|
|||
|
||||
How to synchronize with an IOThread
|
||||
-----------------------------------
|
||||
AioContext is not thread-safe so some rules must be followed when using file
|
||||
descriptors, event notifiers, timers, or BHs across threads:
|
||||
Variables that can be accessed by multiple threads require some form of
|
||||
synchronization such as qemu_mutex_lock(), rcu_read_lock(), etc.
|
||||
|
||||
1. AioContext functions can always be called safely. They handle their
|
||||
own locking internally.
|
||||
|
||||
2. Other threads wishing to access the AioContext must use
|
||||
aio_context_acquire()/aio_context_release() for mutual exclusion. Once the
|
||||
context is acquired no other thread can access it or run event loop iterations
|
||||
in this AioContext.
|
||||
|
||||
Legacy code sometimes nests aio_context_acquire()/aio_context_release() calls.
|
||||
Do not use nesting anymore, it is incompatible with the BDRV_POLL_WHILE() macro
|
||||
used in the block layer and can lead to hangs.
|
||||
|
||||
There is currently no lock ordering rule if a thread needs to acquire multiple
|
||||
AioContexts simultaneously. Therefore, it is only safe for code holding the
|
||||
QEMU global mutex to acquire other AioContexts.
|
||||
AioContext functions like aio_set_fd_handler(), aio_set_event_notifier(),
|
||||
aio_bh_new(), and aio_timer_new() are thread-safe. They can be used to trigger
|
||||
activity in an IOThread.
|
||||
|
||||
Side note: the best way to schedule a function call across threads is to call
|
||||
aio_bh_schedule_oneshot(). No acquire/release or locking is needed.
|
||||
aio_bh_schedule_oneshot().
|
||||
|
||||
The main loop thread can wait synchronously for a condition using
|
||||
AIO_WAIT_WHILE().
|
||||
|
||||
AioContext and the block layer
|
||||
------------------------------
|
||||
|
@ -124,22 +115,16 @@ Block layer code must therefore expect to run in an IOThread and avoid using
|
|||
old APIs that implicitly use the main loop. See the "How to program for
|
||||
IOThreads" above for information on how to do that.
|
||||
|
||||
If main loop code such as a QMP function wishes to access a BlockDriverState
|
||||
it must first call aio_context_acquire(bdrv_get_aio_context(bs)) to ensure
|
||||
that callbacks in the IOThread do not run in parallel.
|
||||
|
||||
Code running in the monitor typically needs to ensure that past
|
||||
requests from the guest are completed. When a block device is running
|
||||
in an IOThread, the IOThread can also process requests from the guest
|
||||
(via ioeventfd). To achieve both objects, wrap the code between
|
||||
bdrv_drained_begin() and bdrv_drained_end(), thus creating a "drained
|
||||
section". The functions must be called between aio_context_acquire()
|
||||
and aio_context_release(). You can freely release and re-acquire the
|
||||
AioContext within a drained section.
|
||||
section".
|
||||
|
||||
Long-running jobs (usually in the form of coroutines) are best scheduled in
|
||||
the BlockDriverState's AioContext to avoid the need to acquire/release around
|
||||
each bdrv_*() call. The functions bdrv_add/remove_aio_context_notifier,
|
||||
or alternatively blk_add/remove_aio_context_notifier if you use BlockBackends,
|
||||
can be used to get a notification whenever bdrv_try_change_aio_context() moves a
|
||||
Long-running jobs (usually in the form of coroutines) are often scheduled in
|
||||
the BlockDriverState's AioContext. The functions
|
||||
bdrv_add/remove_aio_context_notifier, or alternatively
|
||||
blk_add/remove_aio_context_notifier if you use BlockBackends, can be used to
|
||||
get a notification whenever bdrv_try_change_aio_context() moves a
|
||||
BlockDriverState to a different AioContext.
|
||||
|
|
|
@ -32,13 +32,11 @@ struct VirtIOBlockDataPlane {
|
|||
VirtIOBlkConf *conf;
|
||||
VirtIODevice *vdev;
|
||||
|
||||
/* Note that these EventNotifiers are assigned by value. This is
|
||||
* fine as long as you do not call event_notifier_cleanup on them
|
||||
* (because you don't own the file descriptor or handle; you just
|
||||
* use it).
|
||||
/*
|
||||
* The AioContext for each virtqueue. The BlockDriverState will use the
|
||||
* first element as its AioContext.
|
||||
*/
|
||||
IOThread *iothread;
|
||||
AioContext *ctx;
|
||||
AioContext **vq_aio_context;
|
||||
};
|
||||
|
||||
/* Raise an interrupt to signal guest, if necessary */
|
||||
|
@ -47,6 +45,45 @@ void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
|
|||
virtio_notify_irqfd(s->vdev, vq);
|
||||
}
|
||||
|
||||
/* Generate vq:AioContext mappings from a validated iothread-vq-mapping list */
|
||||
static void
|
||||
apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
|
||||
AioContext **vq_aio_context, uint16_t num_queues)
|
||||
{
|
||||
IOThreadVirtQueueMappingList *node;
|
||||
size_t num_iothreads = 0;
|
||||
size_t cur_iothread = 0;
|
||||
|
||||
for (node = iothread_vq_mapping_list; node; node = node->next) {
|
||||
num_iothreads++;
|
||||
}
|
||||
|
||||
for (node = iothread_vq_mapping_list; node; node = node->next) {
|
||||
IOThread *iothread = iothread_by_id(node->value->iothread);
|
||||
AioContext *ctx = iothread_get_aio_context(iothread);
|
||||
|
||||
/* Released in virtio_blk_data_plane_destroy() */
|
||||
object_ref(OBJECT(iothread));
|
||||
|
||||
if (node->value->vqs) {
|
||||
uint16List *vq;
|
||||
|
||||
/* Explicit vq:IOThread assignment */
|
||||
for (vq = node->value->vqs; vq; vq = vq->next) {
|
||||
vq_aio_context[vq->value] = ctx;
|
||||
}
|
||||
} else {
|
||||
/* Round-robin vq:IOThread assignment */
|
||||
for (unsigned i = cur_iothread; i < num_queues;
|
||||
i += num_iothreads) {
|
||||
vq_aio_context[i] = ctx;
|
||||
}
|
||||
}
|
||||
|
||||
cur_iothread++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
||||
VirtIOBlockDataPlane **dataplane,
|
||||
|
@ -58,7 +95,7 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
|||
|
||||
*dataplane = NULL;
|
||||
|
||||
if (conf->iothread) {
|
||||
if (conf->iothread || conf->iothread_vq_mapping_list) {
|
||||
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
|
||||
error_setg(errp,
|
||||
"device is incompatible with iothread "
|
||||
|
@ -86,13 +123,24 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
|||
s = g_new0(VirtIOBlockDataPlane, 1);
|
||||
s->vdev = vdev;
|
||||
s->conf = conf;
|
||||
s->vq_aio_context = g_new(AioContext *, conf->num_queues);
|
||||
|
||||
if (conf->iothread) {
|
||||
s->iothread = conf->iothread;
|
||||
object_ref(OBJECT(s->iothread));
|
||||
s->ctx = iothread_get_aio_context(s->iothread);
|
||||
if (conf->iothread_vq_mapping_list) {
|
||||
apply_vq_mapping(conf->iothread_vq_mapping_list, s->vq_aio_context,
|
||||
conf->num_queues);
|
||||
} else if (conf->iothread) {
|
||||
AioContext *ctx = iothread_get_aio_context(conf->iothread);
|
||||
for (unsigned i = 0; i < conf->num_queues; i++) {
|
||||
s->vq_aio_context[i] = ctx;
|
||||
}
|
||||
|
||||
/* Released in virtio_blk_data_plane_destroy() */
|
||||
object_ref(OBJECT(conf->iothread));
|
||||
} else {
|
||||
s->ctx = qemu_get_aio_context();
|
||||
AioContext *ctx = qemu_get_aio_context();
|
||||
for (unsigned i = 0; i < conf->num_queues; i++) {
|
||||
s->vq_aio_context[i] = ctx;
|
||||
}
|
||||
}
|
||||
|
||||
*dataplane = s;
|
||||
|
@ -104,6 +152,7 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
|||
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
|
||||
{
|
||||
VirtIOBlock *vblk;
|
||||
VirtIOBlkConf *conf = s->conf;
|
||||
|
||||
if (!s) {
|
||||
return;
|
||||
|
@ -111,9 +160,21 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
|
|||
|
||||
vblk = VIRTIO_BLK(s->vdev);
|
||||
assert(!vblk->dataplane_started);
|
||||
if (s->iothread) {
|
||||
object_unref(OBJECT(s->iothread));
|
||||
|
||||
if (conf->iothread_vq_mapping_list) {
|
||||
IOThreadVirtQueueMappingList *node;
|
||||
|
||||
for (node = conf->iothread_vq_mapping_list; node; node = node->next) {
|
||||
IOThread *iothread = iothread_by_id(node->value->iothread);
|
||||
object_unref(OBJECT(iothread));
|
||||
}
|
||||
}
|
||||
|
||||
if (conf->iothread) {
|
||||
object_unref(OBJECT(conf->iothread));
|
||||
}
|
||||
|
||||
g_free(s->vq_aio_context);
|
||||
g_free(s);
|
||||
}
|
||||
|
||||
|
@ -124,7 +185,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|||
VirtIOBlockDataPlane *s = vblk->dataplane;
|
||||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk)));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
AioContext *old_context;
|
||||
unsigned i;
|
||||
unsigned nvqs = s->conf->num_queues;
|
||||
Error *local_err = NULL;
|
||||
|
@ -178,22 +238,13 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|||
|
||||
trace_virtio_blk_data_plane_start(s);
|
||||
|
||||
old_context = blk_get_aio_context(s->conf->conf.blk);
|
||||
aio_context_acquire(old_context);
|
||||
r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err);
|
||||
aio_context_release(old_context);
|
||||
r = blk_set_aio_context(s->conf->conf.blk, s->vq_aio_context[0],
|
||||
&local_err);
|
||||
if (r < 0) {
|
||||
error_report_err(local_err);
|
||||
goto fail_aio_context;
|
||||
}
|
||||
|
||||
/* Kick right away to begin processing requests already in vring */
|
||||
for (i = 0; i < nvqs; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
||||
|
||||
event_notifier_set(virtio_queue_get_host_notifier(vq));
|
||||
}
|
||||
|
||||
/*
|
||||
* These fields must be visible to the IOThread when it processes the
|
||||
* virtqueue, otherwise it will think dataplane has not started yet.
|
||||
|
@ -208,13 +259,15 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|||
|
||||
/* Get this show started by hooking up our callbacks */
|
||||
if (!blk_in_drain(s->conf->conf.blk)) {
|
||||
aio_context_acquire(s->ctx);
|
||||
for (i = 0; i < nvqs; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
||||
AioContext *ctx = s->vq_aio_context[i];
|
||||
|
||||
virtio_queue_aio_attach_host_notifier(vq, s->ctx);
|
||||
/* Kick right away to begin processing requests already in vring */
|
||||
event_notifier_set(virtio_queue_get_host_notifier(vq));
|
||||
|
||||
virtio_queue_aio_attach_host_notifier(vq, ctx);
|
||||
}
|
||||
aio_context_release(s->ctx);
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
@ -242,23 +295,18 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|||
*
|
||||
* Context: BH in IOThread
|
||||
*/
|
||||
static void virtio_blk_data_plane_stop_bh(void *opaque)
|
||||
static void virtio_blk_data_plane_stop_vq_bh(void *opaque)
|
||||
{
|
||||
VirtIOBlockDataPlane *s = opaque;
|
||||
unsigned i;
|
||||
VirtQueue *vq = opaque;
|
||||
EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq);
|
||||
|
||||
for (i = 0; i < s->conf->num_queues; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
||||
EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq);
|
||||
virtio_queue_aio_detach_host_notifier(vq, qemu_get_current_aio_context());
|
||||
|
||||
virtio_queue_aio_detach_host_notifier(vq, s->ctx);
|
||||
|
||||
/*
|
||||
* Test and clear notifier after disabling event, in case poll callback
|
||||
* didn't have time to run.
|
||||
*/
|
||||
virtio_queue_host_notifier_read(host_notifier);
|
||||
}
|
||||
/*
|
||||
* Test and clear notifier after disabling event, in case poll callback
|
||||
* didn't have time to run.
|
||||
*/
|
||||
virtio_queue_host_notifier_read(host_notifier);
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
|
@ -285,7 +333,12 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
|||
trace_virtio_blk_data_plane_stop(s);
|
||||
|
||||
if (!blk_in_drain(s->conf->conf.blk)) {
|
||||
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
|
||||
for (i = 0; i < nvqs; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
||||
AioContext *ctx = s->vq_aio_context[i];
|
||||
|
||||
aio_wait_bh_oneshot(ctx, virtio_blk_data_plane_stop_vq_bh, vq);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -314,8 +367,6 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
|||
*/
|
||||
vblk->dataplane_started = false;
|
||||
|
||||
aio_context_acquire(s->ctx);
|
||||
|
||||
/* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
|
||||
blk_drain(s->conf->conf.blk);
|
||||
|
||||
|
@ -325,10 +376,28 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
|||
*/
|
||||
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
|
||||
|
||||
aio_context_release(s->ctx);
|
||||
|
||||
/* Clean up guest notifier (irq) */
|
||||
k->set_guest_notifiers(qbus->parent, nvqs, false);
|
||||
|
||||
s->stopping = false;
|
||||
}
|
||||
|
||||
void virtio_blk_data_plane_detach(VirtIOBlockDataPlane *s)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s->vdev);
|
||||
|
||||
for (uint16_t i = 0; i < s->conf->num_queues; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(vdev, i);
|
||||
virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_blk_data_plane_attach(VirtIOBlockDataPlane *s)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s->vdev);
|
||||
|
||||
for (uint16_t i = 0; i < s->conf->num_queues; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(vdev, i);
|
||||
virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,4 +28,7 @@ void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq);
|
|||
int virtio_blk_data_plane_start(VirtIODevice *vdev);
|
||||
void virtio_blk_data_plane_stop(VirtIODevice *vdev);
|
||||
|
||||
void virtio_blk_data_plane_detach(VirtIOBlockDataPlane *s);
|
||||
void virtio_blk_data_plane_attach(VirtIOBlockDataPlane *s);
|
||||
|
||||
#endif /* HW_DATAPLANE_VIRTIO_BLK_H */
|
||||
|
|
|
@ -260,8 +260,6 @@ static void xen_block_complete_aio(void *opaque, int ret)
|
|||
XenBlockRequest *request = opaque;
|
||||
XenBlockDataPlane *dataplane = request->dataplane;
|
||||
|
||||
aio_context_acquire(dataplane->ctx);
|
||||
|
||||
if (ret != 0) {
|
||||
error_report("%s I/O error",
|
||||
request->req.operation == BLKIF_OP_READ ?
|
||||
|
@ -273,10 +271,10 @@ static void xen_block_complete_aio(void *opaque, int ret)
|
|||
if (request->presync) {
|
||||
request->presync = 0;
|
||||
xen_block_do_aio(request);
|
||||
goto done;
|
||||
return;
|
||||
}
|
||||
if (request->aio_inflight > 0) {
|
||||
goto done;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (request->req.operation) {
|
||||
|
@ -318,9 +316,6 @@ static void xen_block_complete_aio(void *opaque, int ret)
|
|||
if (dataplane->more_work) {
|
||||
qemu_bh_schedule(dataplane->bh);
|
||||
}
|
||||
|
||||
done:
|
||||
aio_context_release(dataplane->ctx);
|
||||
}
|
||||
|
||||
static bool xen_block_split_discard(XenBlockRequest *request,
|
||||
|
@ -601,9 +596,7 @@ static void xen_block_dataplane_bh(void *opaque)
|
|||
{
|
||||
XenBlockDataPlane *dataplane = opaque;
|
||||
|
||||
aio_context_acquire(dataplane->ctx);
|
||||
xen_block_handle_requests(dataplane);
|
||||
aio_context_release(dataplane->ctx);
|
||||
}
|
||||
|
||||
static bool xen_block_dataplane_event(void *opaque)
|
||||
|
@ -703,10 +696,8 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
|
|||
xen_block_dataplane_detach(dataplane);
|
||||
}
|
||||
|
||||
aio_context_acquire(dataplane->ctx);
|
||||
/* Xen doesn't have multiple users for nodes, so this can't fail */
|
||||
blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort);
|
||||
aio_context_release(dataplane->ctx);
|
||||
|
||||
/*
|
||||
* Now that the context has been moved onto the main thread, cancel
|
||||
|
@ -752,7 +743,6 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
|
|||
{
|
||||
ERRP_GUARD();
|
||||
XenDevice *xendev = dataplane->xendev;
|
||||
AioContext *old_context;
|
||||
unsigned int ring_size;
|
||||
unsigned int i;
|
||||
|
||||
|
@ -836,11 +826,8 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
|
|||
goto stop;
|
||||
}
|
||||
|
||||
old_context = blk_get_aio_context(dataplane->blk);
|
||||
aio_context_acquire(old_context);
|
||||
/* If other users keep the BlockBackend in the iothread, that's ok */
|
||||
blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL);
|
||||
aio_context_release(old_context);
|
||||
|
||||
if (!blk_in_drain(dataplane->blk)) {
|
||||
xen_block_dataplane_attach(dataplane);
|
||||
|
|
|
@ -82,8 +82,11 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
|
|||
/* Break the link as the next request is going to be parsed from the
|
||||
* ring again. Otherwise we may end up doing a double completion! */
|
||||
req->mr_next = NULL;
|
||||
req->next = s->rq;
|
||||
s->rq = req;
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
|
||||
req->next = s->rq;
|
||||
s->rq = req;
|
||||
}
|
||||
} else if (action == BLOCK_ERROR_ACTION_REPORT) {
|
||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
|
||||
if (acct_failed) {
|
||||
|
@ -102,7 +105,6 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
|
|||
VirtIOBlock *s = next->dev;
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
|
||||
while (next) {
|
||||
VirtIOBlockReq *req = next;
|
||||
next = req->mr_next;
|
||||
|
@ -135,7 +137,6 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
|
|||
block_acct_done(blk_get_stats(s->blk), &req->acct);
|
||||
virtio_blk_free_request(req);
|
||||
}
|
||||
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
||||
}
|
||||
|
||||
static void virtio_blk_flush_complete(void *opaque, int ret)
|
||||
|
@ -143,19 +144,13 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
|
|||
VirtIOBlockReq *req = opaque;
|
||||
VirtIOBlock *s = req->dev;
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
|
||||
if (ret) {
|
||||
if (virtio_blk_handle_rw_error(req, -ret, 0, true)) {
|
||||
goto out;
|
||||
}
|
||||
if (ret && virtio_blk_handle_rw_error(req, -ret, 0, true)) {
|
||||
return;
|
||||
}
|
||||
|
||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
|
||||
block_acct_done(blk_get_stats(s->blk), &req->acct);
|
||||
virtio_blk_free_request(req);
|
||||
|
||||
out:
|
||||
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
||||
}
|
||||
|
||||
static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
|
||||
|
@ -165,11 +160,8 @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
|
|||
bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
|
||||
~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
|
||||
if (ret) {
|
||||
if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
|
||||
goto out;
|
||||
}
|
||||
if (ret && virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
|
||||
return;
|
||||
}
|
||||
|
||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
|
||||
|
@ -177,9 +169,6 @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
|
|||
block_acct_done(blk_get_stats(s->blk), &req->acct);
|
||||
}
|
||||
virtio_blk_free_request(req);
|
||||
|
||||
out:
|
||||
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
|
@ -226,10 +215,8 @@ static void virtio_blk_ioctl_complete(void *opaque, int status)
|
|||
virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
|
||||
|
||||
out:
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
|
||||
virtio_blk_req_complete(req, status);
|
||||
virtio_blk_free_request(req);
|
||||
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
||||
g_free(ioctl_req);
|
||||
}
|
||||
|
||||
|
@ -669,7 +656,6 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret)
|
|||
{
|
||||
ZoneCmdData *data = opaque;
|
||||
VirtIOBlockReq *req = data->req;
|
||||
VirtIOBlock *s = req->dev;
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
|
||||
struct iovec *in_iov = data->in_iov;
|
||||
unsigned in_num = data->in_num;
|
||||
|
@ -760,10 +746,8 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret)
|
|||
}
|
||||
|
||||
out:
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
|
||||
virtio_blk_req_complete(req, err_status);
|
||||
virtio_blk_free_request(req);
|
||||
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
||||
g_free(data->zone_report_data.zones);
|
||||
g_free(data);
|
||||
}
|
||||
|
@ -826,10 +810,8 @@ static void virtio_blk_zone_mgmt_complete(void *opaque, int ret)
|
|||
err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
|
||||
}
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
|
||||
virtio_blk_req_complete(req, err_status);
|
||||
virtio_blk_free_request(req);
|
||||
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
||||
}
|
||||
|
||||
static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
|
||||
|
@ -879,7 +861,6 @@ static void virtio_blk_zone_append_complete(void *opaque, int ret)
|
|||
{
|
||||
ZoneCmdData *data = opaque;
|
||||
VirtIOBlockReq *req = data->req;
|
||||
VirtIOBlock *s = req->dev;
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
|
||||
int64_t append_sector, n;
|
||||
uint8_t err_status = VIRTIO_BLK_S_OK;
|
||||
|
@ -902,10 +883,8 @@ static void virtio_blk_zone_append_complete(void *opaque, int ret)
|
|||
trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret);
|
||||
|
||||
out:
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
|
||||
virtio_blk_req_complete(req, err_status);
|
||||
virtio_blk_free_request(req);
|
||||
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
||||
g_free(data);
|
||||
}
|
||||
|
||||
|
@ -941,10 +920,8 @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req,
|
|||
return 0;
|
||||
|
||||
out:
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
|
||||
virtio_blk_req_complete(req, err_status);
|
||||
virtio_blk_free_request(req);
|
||||
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
||||
return err_status;
|
||||
}
|
||||
|
||||
|
@ -1134,7 +1111,6 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
|
|||
MultiReqBuffer mrb = {};
|
||||
bool suppress_notifications = virtio_queue_get_notification(vq);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->blk));
|
||||
defer_call_begin();
|
||||
|
||||
do {
|
||||
|
@ -1160,7 +1136,6 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
|
|||
}
|
||||
|
||||
defer_call_end();
|
||||
aio_context_release(blk_get_aio_context(s->blk));
|
||||
}
|
||||
|
||||
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
||||
|
@ -1176,6 +1151,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
|||
return;
|
||||
}
|
||||
}
|
||||
|
||||
virtio_blk_handle_vq(s, vq);
|
||||
}
|
||||
|
||||
|
@ -1183,12 +1159,14 @@ static void virtio_blk_dma_restart_bh(void *opaque)
|
|||
{
|
||||
VirtIOBlock *s = opaque;
|
||||
|
||||
VirtIOBlockReq *req = s->rq;
|
||||
VirtIOBlockReq *req;
|
||||
MultiReqBuffer mrb = {};
|
||||
|
||||
s->rq = NULL;
|
||||
WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
|
||||
req = s->rq;
|
||||
s->rq = NULL;
|
||||
}
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
|
||||
while (req) {
|
||||
VirtIOBlockReq *next = req->next;
|
||||
if (virtio_blk_handle_request(req, &mrb)) {
|
||||
|
@ -1212,8 +1190,6 @@ static void virtio_blk_dma_restart_bh(void *opaque)
|
|||
|
||||
/* Paired with inc in virtio_blk_dma_restart_cb() */
|
||||
blk_dec_in_flight(s->conf.conf.blk);
|
||||
|
||||
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
||||
}
|
||||
|
||||
static void virtio_blk_dma_restart_cb(void *opaque, bool running,
|
||||
|
@ -1235,25 +1211,28 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
|
|||
static void virtio_blk_reset(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIOBlock *s = VIRTIO_BLK(vdev);
|
||||
AioContext *ctx;
|
||||
VirtIOBlockReq *req;
|
||||
|
||||
ctx = blk_get_aio_context(s->blk);
|
||||
aio_context_acquire(ctx);
|
||||
/* Dataplane has stopped... */
|
||||
assert(!s->dataplane_started);
|
||||
|
||||
/* ...but requests may still be in flight. */
|
||||
blk_drain(s->blk);
|
||||
|
||||
/* We drop queued requests after blk_drain() because blk_drain() itself can
|
||||
* produce them. */
|
||||
while (s->rq) {
|
||||
req = s->rq;
|
||||
s->rq = req->next;
|
||||
virtqueue_detach_element(req->vq, &req->elem, 0);
|
||||
virtio_blk_free_request(req);
|
||||
WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
|
||||
while (s->rq) {
|
||||
req = s->rq;
|
||||
s->rq = req->next;
|
||||
|
||||
/* No other threads can access req->vq here */
|
||||
virtqueue_detach_element(req->vq, &req->elem, 0);
|
||||
|
||||
virtio_blk_free_request(req);
|
||||
}
|
||||
}
|
||||
|
||||
aio_context_release(ctx);
|
||||
|
||||
assert(!s->dataplane_started);
|
||||
blk_set_enable_write_cache(s->blk, s->original_wce);
|
||||
}
|
||||
|
||||
|
@ -1268,10 +1247,6 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
|
|||
uint64_t capacity;
|
||||
int64_t length;
|
||||
int blk_size = conf->logical_block_size;
|
||||
AioContext *ctx;
|
||||
|
||||
ctx = blk_get_aio_context(s->blk);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
blk_get_geometry(s->blk, &capacity);
|
||||
memset(&blkcfg, 0, sizeof(blkcfg));
|
||||
|
@ -1295,7 +1270,6 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
|
|||
* per track (cylinder).
|
||||
*/
|
||||
length = blk_getlength(s->blk);
|
||||
aio_context_release(ctx);
|
||||
if (length > 0 && length / conf->heads / conf->secs % blk_size) {
|
||||
blkcfg.geometry.sectors = conf->secs & ~s->sector_mask;
|
||||
} else {
|
||||
|
@ -1362,9 +1336,7 @@ static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
|
|||
|
||||
memcpy(&blkcfg, config, s->config_size);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->blk));
|
||||
blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
|
||||
aio_context_release(blk_get_aio_context(s->blk));
|
||||
}
|
||||
|
||||
static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
|
||||
|
@ -1432,29 +1404,31 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
|
|||
* s->blk would erroneously be placed in writethrough mode.
|
||||
*/
|
||||
if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
|
||||
aio_context_acquire(blk_get_aio_context(s->blk));
|
||||
blk_set_enable_write_cache(s->blk,
|
||||
virtio_vdev_has_feature(vdev,
|
||||
VIRTIO_BLK_F_WCE));
|
||||
aio_context_release(blk_get_aio_context(s->blk));
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
|
||||
{
|
||||
VirtIOBlock *s = VIRTIO_BLK(vdev);
|
||||
VirtIOBlockReq *req = s->rq;
|
||||
|
||||
while (req) {
|
||||
qemu_put_sbyte(f, 1);
|
||||
WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
|
||||
VirtIOBlockReq *req = s->rq;
|
||||
|
||||
if (s->conf.num_queues > 1) {
|
||||
qemu_put_be32(f, virtio_get_queue_index(req->vq));
|
||||
while (req) {
|
||||
qemu_put_sbyte(f, 1);
|
||||
|
||||
if (s->conf.num_queues > 1) {
|
||||
qemu_put_be32(f, virtio_get_queue_index(req->vq));
|
||||
}
|
||||
|
||||
qemu_put_virtqueue_element(vdev, f, &req->elem);
|
||||
req = req->next;
|
||||
}
|
||||
|
||||
qemu_put_virtqueue_element(vdev, f, &req->elem);
|
||||
req = req->next;
|
||||
}
|
||||
|
||||
qemu_put_sbyte(f, 0);
|
||||
}
|
||||
|
||||
|
@ -1480,13 +1454,78 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
|
|||
|
||||
req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq));
|
||||
virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
|
||||
req->next = s->rq;
|
||||
s->rq = req;
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
|
||||
req->next = s->rq;
|
||||
s->rq = req;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
|
||||
uint16_t num_queues, Error **errp)
|
||||
{
|
||||
g_autofree unsigned long *vqs = bitmap_new(num_queues);
|
||||
g_autoptr(GHashTable) iothreads =
|
||||
g_hash_table_new(g_str_hash, g_str_equal);
|
||||
|
||||
for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
|
||||
const char *name = node->value->iothread;
|
||||
uint16List *vq;
|
||||
|
||||
if (!iothread_by_id(name)) {
|
||||
error_setg(errp, "IOThread \"%s\" object does not exist", name);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!g_hash_table_add(iothreads, (gpointer)name)) {
|
||||
error_setg(errp,
|
||||
"duplicate IOThread name \"%s\" in iothread-vq-mapping",
|
||||
name);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (node != list) {
|
||||
if (!!node->value->vqs != !!list->value->vqs) {
|
||||
error_setg(errp, "either all items in iothread-vq-mapping "
|
||||
"must have vqs or none of them must have it");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
for (vq = node->value->vqs; vq; vq = vq->next) {
|
||||
if (vq->value >= num_queues) {
|
||||
error_setg(errp, "vq index %u for IOThread \"%s\" must be "
|
||||
"less than num_queues %u in iothread-vq-mapping",
|
||||
vq->value, name, num_queues);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (test_and_set_bit(vq->value, vqs)) {
|
||||
error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
|
||||
"because it is already assigned", vq->value, name);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (list->value->vqs) {
|
||||
for (uint16_t i = 0; i < num_queues; i++) {
|
||||
if (!test_bit(i, vqs)) {
|
||||
error_setg(errp,
|
||||
"missing vq %u IOThread assignment in iothread-vq-mapping",
|
||||
i);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void virtio_resize_cb(void *opaque)
|
||||
{
|
||||
VirtIODevice *vdev = opaque;
|
||||
|
@ -1511,34 +1550,24 @@ static void virtio_blk_resize(void *opaque)
|
|||
static void virtio_blk_drained_begin(void *opaque)
|
||||
{
|
||||
VirtIOBlock *s = opaque;
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
|
||||
AioContext *ctx = blk_get_aio_context(s->conf.conf.blk);
|
||||
|
||||
if (!s->dataplane || !s->dataplane_started) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (uint16_t i = 0; i < s->conf.num_queues; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(vdev, i);
|
||||
virtio_queue_aio_detach_host_notifier(vq, ctx);
|
||||
}
|
||||
virtio_blk_data_plane_detach(s->dataplane);
|
||||
}
|
||||
|
||||
/* Resume virtqueue ioeventfd processing after drain */
|
||||
static void virtio_blk_drained_end(void *opaque)
|
||||
{
|
||||
VirtIOBlock *s = opaque;
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
|
||||
AioContext *ctx = blk_get_aio_context(s->conf.conf.blk);
|
||||
|
||||
if (!s->dataplane || !s->dataplane_started) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (uint16_t i = 0; i < s->conf.num_queues; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(vdev, i);
|
||||
virtio_queue_aio_attach_host_notifier(vq, ctx);
|
||||
}
|
||||
virtio_blk_data_plane_attach(s->dataplane);
|
||||
}
|
||||
|
||||
static const BlockDevOps virtio_block_ops = {
|
||||
|
@ -1624,10 +1653,25 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
|
|||
return;
|
||||
}
|
||||
|
||||
if (conf->iothread_vq_mapping_list) {
|
||||
if (conf->iothread) {
|
||||
error_setg(errp, "iothread and iothread-vq-mapping properties "
|
||||
"cannot be set at the same time");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!validate_iothread_vq_mapping_list(conf->iothread_vq_mapping_list,
|
||||
conf->num_queues, errp)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
|
||||
s->host_features);
|
||||
virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
|
||||
|
||||
qemu_mutex_init(&s->rq_lock);
|
||||
|
||||
s->blk = conf->conf.blk;
|
||||
s->rq = NULL;
|
||||
s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
|
||||
|
@ -1679,6 +1723,7 @@ static void virtio_blk_device_unrealize(DeviceState *dev)
|
|||
virtio_del_queue(vdev, i);
|
||||
}
|
||||
qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2);
|
||||
qemu_mutex_destroy(&s->rq_lock);
|
||||
blk_ram_registrar_destroy(&s->blk_ram_registrar);
|
||||
qemu_del_vm_change_state_handler(s->change);
|
||||
blockdev_mark_auto_del(s->blk);
|
||||
|
@ -1723,6 +1768,8 @@ static Property virtio_blk_properties[] = {
|
|||
DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true),
|
||||
DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
|
||||
IOThread *),
|
||||
DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock,
|
||||
conf.iothread_vq_mapping_list),
|
||||
DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
|
||||
VIRTIO_BLK_F_DISCARD, true),
|
||||
DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock,
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "qapi/qapi-types-block.h"
|
||||
#include "qapi/qapi-types-machine.h"
|
||||
#include "qapi/qapi-types-migration.h"
|
||||
#include "qapi/qapi-visit-virtio.h"
|
||||
#include "qapi/qmp/qerror.h"
|
||||
#include "qemu/ctype.h"
|
||||
#include "qemu/cutils.h"
|
||||
|
@ -120,9 +121,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name,
|
|||
"node");
|
||||
}
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
blk_replace_bs(blk, bs, errp);
|
||||
aio_context_release(ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -148,10 +147,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name,
|
|||
0, BLK_PERM_ALL);
|
||||
blk_created = true;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
ret = blk_insert_bs(blk, bs, errp);
|
||||
aio_context_release(ctx);
|
||||
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
@ -207,12 +203,8 @@ static void release_drive(Object *obj, const char *name, void *opaque)
|
|||
BlockBackend **ptr = object_field_prop_ptr(obj, prop);
|
||||
|
||||
if (*ptr) {
|
||||
AioContext *ctx = blk_get_aio_context(*ptr);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
blockdev_auto_del(*ptr);
|
||||
blk_detach_dev(*ptr, dev);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1169,3 +1161,48 @@ const PropertyInfo qdev_prop_cpus390entitlement = {
|
|||
.set = qdev_propinfo_set_enum,
|
||||
.set_default_value = qdev_propinfo_set_default_value_enum,
|
||||
};
|
||||
|
||||
/* --- IOThreadVirtQueueMappingList --- */
|
||||
|
||||
static void get_iothread_vq_mapping_list(Object *obj, Visitor *v,
|
||||
const char *name, void *opaque, Error **errp)
|
||||
{
|
||||
IOThreadVirtQueueMappingList **prop_ptr =
|
||||
object_field_prop_ptr(obj, opaque);
|
||||
|
||||
visit_type_IOThreadVirtQueueMappingList(v, name, prop_ptr, errp);
|
||||
}
|
||||
|
||||
static void set_iothread_vq_mapping_list(Object *obj, Visitor *v,
|
||||
const char *name, void *opaque, Error **errp)
|
||||
{
|
||||
IOThreadVirtQueueMappingList **prop_ptr =
|
||||
object_field_prop_ptr(obj, opaque);
|
||||
IOThreadVirtQueueMappingList *list;
|
||||
|
||||
if (!visit_type_IOThreadVirtQueueMappingList(v, name, &list, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
qapi_free_IOThreadVirtQueueMappingList(*prop_ptr);
|
||||
*prop_ptr = list;
|
||||
}
|
||||
|
||||
static void release_iothread_vq_mapping_list(Object *obj,
|
||||
const char *name, void *opaque)
|
||||
{
|
||||
IOThreadVirtQueueMappingList **prop_ptr =
|
||||
object_field_prop_ptr(obj, opaque);
|
||||
|
||||
qapi_free_IOThreadVirtQueueMappingList(*prop_ptr);
|
||||
*prop_ptr = NULL;
|
||||
}
|
||||
|
||||
const PropertyInfo qdev_prop_iothread_vq_mapping_list = {
|
||||
.name = "IOThreadVirtQueueMappingList",
|
||||
.description = "IOThread virtqueue mapping list [{\"iothread\":\"<id>\", "
|
||||
"\"vqs\":[1,2,3,...]},...]",
|
||||
.get = get_iothread_vq_mapping_list,
|
||||
.set = set_iothread_vq_mapping_list,
|
||||
.release = release_iothread_vq_mapping_list,
|
||||
};
|
||||
|
|
|
@ -1076,16 +1076,18 @@ void device_class_set_props(DeviceClass *dc, Property *props)
|
|||
void qdev_alias_all_properties(DeviceState *target, Object *source)
|
||||
{
|
||||
ObjectClass *class;
|
||||
Property *prop;
|
||||
ObjectPropertyIterator iter;
|
||||
ObjectProperty *prop;
|
||||
|
||||
class = object_get_class(OBJECT(target));
|
||||
do {
|
||||
DeviceClass *dc = DEVICE_CLASS(class);
|
||||
|
||||
for (prop = dc->props_; prop && prop->name; prop++) {
|
||||
object_property_add_alias(source, prop->name,
|
||||
OBJECT(target), prop->name);
|
||||
object_class_property_iter_init(&iter, class);
|
||||
while ((prop = object_property_iter_next(&iter))) {
|
||||
if (object_property_find(source, prop->name)) {
|
||||
continue; /* skip duplicate properties */
|
||||
}
|
||||
class = object_class_get_parent(class);
|
||||
} while (class != object_class_by_name(TYPE_DEVICE));
|
||||
|
||||
object_property_add_alias(source, prop->name,
|
||||
OBJECT(target), prop->name);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,6 +85,89 @@ SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun)
|
|||
return d;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoke @fn() for each enqueued request in device @s. Must be called from the
|
||||
* main loop thread while the guest is stopped. This is only suitable for
|
||||
* vmstate ->put(), use scsi_device_for_each_req_async() for other cases.
|
||||
*/
|
||||
static void scsi_device_for_each_req_sync(SCSIDevice *s,
|
||||
void (*fn)(SCSIRequest *, void *),
|
||||
void *opaque)
|
||||
{
|
||||
SCSIRequest *req;
|
||||
SCSIRequest *next_req;
|
||||
|
||||
assert(!runstate_is_running());
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
|
||||
fn(req, opaque);
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
SCSIDevice *s;
|
||||
void (*fn)(SCSIRequest *, void *);
|
||||
void *fn_opaque;
|
||||
} SCSIDeviceForEachReqAsyncData;
|
||||
|
||||
static void scsi_device_for_each_req_async_bh(void *opaque)
|
||||
{
|
||||
g_autofree SCSIDeviceForEachReqAsyncData *data = opaque;
|
||||
SCSIDevice *s = data->s;
|
||||
AioContext *ctx;
|
||||
SCSIRequest *req;
|
||||
SCSIRequest *next;
|
||||
|
||||
/*
|
||||
* If the AioContext changed before this BH was called then reschedule into
|
||||
* the new AioContext before accessing ->requests. This can happen when
|
||||
* scsi_device_for_each_req_async() is called and then the AioContext is
|
||||
* changed before BHs are run.
|
||||
*/
|
||||
ctx = blk_get_aio_context(s->conf.blk);
|
||||
if (ctx != qemu_get_current_aio_context()) {
|
||||
aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh,
|
||||
g_steal_pointer(&data));
|
||||
return;
|
||||
}
|
||||
|
||||
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
|
||||
data->fn(req, data->fn_opaque);
|
||||
}
|
||||
|
||||
/* Drop the reference taken by scsi_device_for_each_req_async() */
|
||||
object_unref(OBJECT(s));
|
||||
}
|
||||
|
||||
/*
|
||||
* Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
|
||||
* runs in the AioContext that is executing the request.
|
||||
*/
|
||||
static void scsi_device_for_each_req_async(SCSIDevice *s,
|
||||
void (*fn)(SCSIRequest *, void *),
|
||||
void *opaque)
|
||||
{
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
SCSIDeviceForEachReqAsyncData *data =
|
||||
g_new(SCSIDeviceForEachReqAsyncData, 1);
|
||||
|
||||
data->s = s;
|
||||
data->fn = fn;
|
||||
data->fn_opaque = opaque;
|
||||
|
||||
/*
|
||||
* Hold a reference to the SCSIDevice until
|
||||
* scsi_device_for_each_req_async_bh() finishes.
|
||||
*/
|
||||
object_ref(OBJECT(s));
|
||||
|
||||
aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
|
||||
scsi_device_for_each_req_async_bh,
|
||||
data);
|
||||
}
|
||||
|
||||
static void scsi_device_realize(SCSIDevice *s, Error **errp)
|
||||
{
|
||||
SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
|
||||
|
@ -144,20 +227,18 @@ void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
|
|||
qbus_set_bus_hotplug_handler(BUS(bus));
|
||||
}
|
||||
|
||||
static void scsi_dma_restart_bh(void *opaque)
|
||||
void scsi_req_retry(SCSIRequest *req)
|
||||
{
|
||||
SCSIDevice *s = opaque;
|
||||
SCSIRequest *req, *next;
|
||||
req->retry = true;
|
||||
}
|
||||
|
||||
qemu_bh_delete(s->bh);
|
||||
s->bh = NULL;
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.blk));
|
||||
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
|
||||
scsi_req_ref(req);
|
||||
if (req->retry) {
|
||||
req->retry = false;
|
||||
switch (req->cmd.mode) {
|
||||
/* Called in the AioContext that is executing the request */
|
||||
static void scsi_dma_restart_req(SCSIRequest *req, void *opaque)
|
||||
{
|
||||
scsi_req_ref(req);
|
||||
if (req->retry) {
|
||||
req->retry = false;
|
||||
switch (req->cmd.mode) {
|
||||
case SCSI_XFER_FROM_DEV:
|
||||
case SCSI_XFER_TO_DEV:
|
||||
scsi_req_continue(req);
|
||||
|
@ -166,37 +247,22 @@ static void scsi_dma_restart_bh(void *opaque)
|
|||
scsi_req_dequeue(req);
|
||||
scsi_req_enqueue(req);
|
||||
break;
|
||||
}
|
||||
}
|
||||
scsi_req_unref(req);
|
||||
}
|
||||
aio_context_release(blk_get_aio_context(s->conf.blk));
|
||||
/* Drop the reference that was acquired in scsi_dma_restart_cb */
|
||||
object_unref(OBJECT(s));
|
||||
}
|
||||
|
||||
void scsi_req_retry(SCSIRequest *req)
|
||||
{
|
||||
/* No need to save a reference, because scsi_dma_restart_bh just
|
||||
* looks at the request list. */
|
||||
req->retry = true;
|
||||
scsi_req_unref(req);
|
||||
}
|
||||
|
||||
static void scsi_dma_restart_cb(void *opaque, bool running, RunState state)
|
||||
{
|
||||
SCSIDevice *s = opaque;
|
||||
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
if (!running) {
|
||||
return;
|
||||
}
|
||||
if (!s->bh) {
|
||||
AioContext *ctx = blk_get_aio_context(s->conf.blk);
|
||||
/* The reference is dropped in scsi_dma_restart_bh.*/
|
||||
object_ref(OBJECT(s));
|
||||
s->bh = aio_bh_new_guarded(ctx, scsi_dma_restart_bh, s,
|
||||
&DEVICE(s)->mem_reentrancy_guard);
|
||||
qemu_bh_schedule(s->bh);
|
||||
}
|
||||
|
||||
scsi_device_for_each_req_async(s, scsi_dma_restart_req, NULL);
|
||||
}
|
||||
|
||||
static bool scsi_bus_is_address_free(SCSIBus *bus,
|
||||
|
@ -1657,17 +1723,16 @@ void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
|
|||
}
|
||||
}
|
||||
|
||||
static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque)
|
||||
{
|
||||
scsi_req_cancel_async(req, NULL);
|
||||
}
|
||||
|
||||
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
|
||||
{
|
||||
SCSIRequest *req;
|
||||
scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(sdev->conf.blk));
|
||||
while (!QTAILQ_EMPTY(&sdev->requests)) {
|
||||
req = QTAILQ_FIRST(&sdev->requests);
|
||||
scsi_req_cancel_async(req, NULL);
|
||||
}
|
||||
blk_drain(sdev->conf.blk);
|
||||
aio_context_release(blk_get_aio_context(sdev->conf.blk));
|
||||
scsi_device_set_ua(sdev, sense);
|
||||
}
|
||||
|
||||
|
@ -1737,31 +1802,33 @@ static char *scsibus_get_fw_dev_path(DeviceState *dev)
|
|||
|
||||
/* SCSI request list. For simplicity, pv points to the whole device */
|
||||
|
||||
static void put_scsi_req(SCSIRequest *req, void *opaque)
|
||||
{
|
||||
QEMUFile *f = opaque;
|
||||
|
||||
assert(!req->io_canceled);
|
||||
assert(req->status == -1 && req->host_status == -1);
|
||||
assert(req->enqueued);
|
||||
|
||||
qemu_put_sbyte(f, req->retry ? 1 : 2);
|
||||
qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
|
||||
qemu_put_be32s(f, &req->tag);
|
||||
qemu_put_be32s(f, &req->lun);
|
||||
if (req->bus->info->save_request) {
|
||||
req->bus->info->save_request(f, req);
|
||||
}
|
||||
if (req->ops->save_request) {
|
||||
req->ops->save_request(f, req);
|
||||
}
|
||||
}
|
||||
|
||||
static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
|
||||
const VMStateField *field, JSONWriter *vmdesc)
|
||||
{
|
||||
SCSIDevice *s = pv;
|
||||
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
|
||||
SCSIRequest *req;
|
||||
|
||||
QTAILQ_FOREACH(req, &s->requests, next) {
|
||||
assert(!req->io_canceled);
|
||||
assert(req->status == -1 && req->host_status == -1);
|
||||
assert(req->enqueued);
|
||||
|
||||
qemu_put_sbyte(f, req->retry ? 1 : 2);
|
||||
qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
|
||||
qemu_put_be32s(f, &req->tag);
|
||||
qemu_put_be32s(f, &req->lun);
|
||||
if (bus->info->save_request) {
|
||||
bus->info->save_request(f, req);
|
||||
}
|
||||
if (req->ops->save_request) {
|
||||
req->ops->save_request(f, req);
|
||||
}
|
||||
}
|
||||
scsi_device_for_each_req_sync(s, put_scsi_req, f);
|
||||
qemu_put_sbyte(f, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -273,7 +273,9 @@ static void scsi_aio_complete(void *opaque, int ret)
|
|||
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
|
||||
/* The request must only run in the BlockBackend's AioContext */
|
||||
assert(blk_get_aio_context(s->qdev.conf.blk) ==
|
||||
qemu_get_current_aio_context());
|
||||
|
||||
assert(r->req.aiocb != NULL);
|
||||
r->req.aiocb = NULL;
|
||||
|
@ -286,7 +288,6 @@ static void scsi_aio_complete(void *opaque, int ret)
|
|||
scsi_req_complete(&r->req, GOOD);
|
||||
|
||||
done:
|
||||
aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
|
||||
scsi_req_unref(&r->req);
|
||||
}
|
||||
|
||||
|
@ -354,7 +355,6 @@ done:
|
|||
scsi_req_unref(&r->req);
|
||||
}
|
||||
|
||||
/* Called with AioContext lock held */
|
||||
static void scsi_dma_complete(void *opaque, int ret)
|
||||
{
|
||||
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
|
||||
|
@ -373,8 +373,13 @@ static void scsi_dma_complete(void *opaque, int ret)
|
|||
|
||||
static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
|
||||
{
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
|
||||
uint32_t n;
|
||||
|
||||
/* The request must only run in the BlockBackend's AioContext */
|
||||
assert(blk_get_aio_context(s->qdev.conf.blk) ==
|
||||
qemu_get_current_aio_context());
|
||||
|
||||
assert(r->req.aiocb == NULL);
|
||||
if (scsi_disk_req_check_error(r, ret, false)) {
|
||||
goto done;
|
||||
|
@ -394,8 +399,6 @@ static void scsi_read_complete(void *opaque, int ret)
|
|||
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
|
||||
|
||||
assert(r->req.aiocb != NULL);
|
||||
r->req.aiocb = NULL;
|
||||
|
||||
|
@ -406,7 +409,6 @@ static void scsi_read_complete(void *opaque, int ret)
|
|||
trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
|
||||
}
|
||||
scsi_read_complete_noio(r, ret);
|
||||
aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
|
||||
}
|
||||
|
||||
/* Actually issue a read to the block device. */
|
||||
|
@ -448,8 +450,6 @@ static void scsi_do_read_cb(void *opaque, int ret)
|
|||
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
|
||||
|
||||
assert (r->req.aiocb != NULL);
|
||||
r->req.aiocb = NULL;
|
||||
|
||||
|
@ -459,7 +459,6 @@ static void scsi_do_read_cb(void *opaque, int ret)
|
|||
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
|
||||
}
|
||||
scsi_do_read(opaque, ret);
|
||||
aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
|
||||
}
|
||||
|
||||
/* Read more data from scsi device into buffer. */
|
||||
|
@ -505,8 +504,13 @@ static void scsi_read_data(SCSIRequest *req)
|
|||
|
||||
static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
|
||||
{
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
|
||||
uint32_t n;
|
||||
|
||||
/* The request must only run in the BlockBackend's AioContext */
|
||||
assert(blk_get_aio_context(s->qdev.conf.blk) ==
|
||||
qemu_get_current_aio_context());
|
||||
|
||||
assert (r->req.aiocb == NULL);
|
||||
if (scsi_disk_req_check_error(r, ret, false)) {
|
||||
goto done;
|
||||
|
@ -533,8 +537,6 @@ static void scsi_write_complete(void * opaque, int ret)
|
|||
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
|
||||
|
||||
assert (r->req.aiocb != NULL);
|
||||
r->req.aiocb = NULL;
|
||||
|
||||
|
@ -544,7 +546,6 @@ static void scsi_write_complete(void * opaque, int ret)
|
|||
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
|
||||
}
|
||||
scsi_write_complete_noio(r, ret);
|
||||
aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
|
||||
}
|
||||
|
||||
static void scsi_write_data(SCSIRequest *req)
|
||||
|
@ -1742,8 +1743,6 @@ static void scsi_unmap_complete(void *opaque, int ret)
|
|||
SCSIDiskReq *r = data->r;
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
|
||||
|
||||
assert(r->req.aiocb != NULL);
|
||||
r->req.aiocb = NULL;
|
||||
|
||||
|
@ -1754,7 +1753,6 @@ static void scsi_unmap_complete(void *opaque, int ret)
|
|||
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
|
||||
scsi_unmap_complete_noio(data, ret);
|
||||
}
|
||||
aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
|
||||
}
|
||||
|
||||
static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
|
||||
|
@ -1822,8 +1820,6 @@ static void scsi_write_same_complete(void *opaque, int ret)
|
|||
SCSIDiskReq *r = data->r;
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
|
||||
|
||||
assert(r->req.aiocb != NULL);
|
||||
r->req.aiocb = NULL;
|
||||
|
||||
|
@ -1847,7 +1843,6 @@ static void scsi_write_same_complete(void *opaque, int ret)
|
|||
data->sector << BDRV_SECTOR_BITS,
|
||||
&data->qiov, 0,
|
||||
scsi_write_same_complete, data);
|
||||
aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1857,7 +1852,6 @@ done:
|
|||
scsi_req_unref(&r->req);
|
||||
qemu_vfree(data->iov.iov_base);
|
||||
g_free(data);
|
||||
aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
|
||||
}
|
||||
|
||||
static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
|
||||
|
@ -2344,14 +2338,10 @@ static void scsi_disk_reset(DeviceState *dev)
|
|||
{
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
|
||||
uint64_t nb_sectors;
|
||||
AioContext *ctx;
|
||||
|
||||
scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
|
||||
|
||||
ctx = blk_get_aio_context(s->qdev.conf.blk);
|
||||
aio_context_acquire(ctx);
|
||||
blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
|
||||
aio_context_release(ctx);
|
||||
|
||||
nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
|
||||
if (nb_sectors) {
|
||||
|
@ -2550,15 +2540,13 @@ static void scsi_unrealize(SCSIDevice *dev)
|
|||
static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
|
||||
{
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
|
||||
AioContext *ctx = NULL;
|
||||
|
||||
/* can happen for devices without drive. The error message for missing
|
||||
* backend will be issued in scsi_realize
|
||||
*/
|
||||
if (s->qdev.conf.blk) {
|
||||
ctx = blk_get_aio_context(s->qdev.conf.blk);
|
||||
aio_context_acquire(ctx);
|
||||
if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
}
|
||||
s->qdev.blocksize = s->qdev.conf.logical_block_size;
|
||||
|
@ -2567,16 +2555,11 @@ static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
|
|||
s->product = g_strdup("QEMU HARDDISK");
|
||||
}
|
||||
scsi_realize(&s->qdev, errp);
|
||||
out:
|
||||
if (ctx) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
|
||||
{
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
|
||||
AioContext *ctx;
|
||||
int ret;
|
||||
uint32_t blocksize = 2048;
|
||||
|
||||
|
@ -2592,8 +2575,6 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
|
|||
blocksize = dev->conf.physical_block_size;
|
||||
}
|
||||
|
||||
ctx = blk_get_aio_context(dev->conf.blk);
|
||||
aio_context_acquire(ctx);
|
||||
s->qdev.blocksize = blocksize;
|
||||
s->qdev.type = TYPE_ROM;
|
||||
s->features |= 1 << SCSI_DISK_F_REMOVABLE;
|
||||
|
@ -2601,7 +2582,6 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
|
|||
s->product = g_strdup("QEMU CD-ROM");
|
||||
}
|
||||
scsi_realize(&s->qdev, errp);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2732,7 +2712,6 @@ static int get_device_type(SCSIDiskState *s)
|
|||
static void scsi_block_realize(SCSIDevice *dev, Error **errp)
|
||||
{
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
|
||||
AioContext *ctx;
|
||||
int sg_version;
|
||||
int rc;
|
||||
|
||||
|
@ -2747,9 +2726,6 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp)
|
|||
"be removed in a future version");
|
||||
}
|
||||
|
||||
ctx = blk_get_aio_context(s->qdev.conf.blk);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
/* check we are using a driver managing SG_IO (version 3 and after) */
|
||||
rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
|
||||
if (rc < 0) {
|
||||
|
@ -2757,18 +2733,18 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp)
|
|||
if (rc != -EPERM) {
|
||||
error_append_hint(errp, "Is this a SCSI device?\n");
|
||||
}
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
if (sg_version < 30000) {
|
||||
error_setg(errp, "scsi generic interface too old");
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/* get device type from INQUIRY data */
|
||||
rc = get_device_type(s);
|
||||
if (rc < 0) {
|
||||
error_setg(errp, "INQUIRY failed");
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Make a guess for the block size, we'll fix it when the guest sends.
|
||||
|
@ -2788,9 +2764,6 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp)
|
|||
|
||||
scsi_realize(&s->qdev, errp);
|
||||
scsi_generic_read_device_inquiry(&s->qdev);
|
||||
|
||||
out:
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
typedef struct SCSIBlockReq {
|
||||
|
@ -2810,7 +2783,6 @@ static void scsi_block_sgio_complete(void *opaque, int ret)
|
|||
{
|
||||
SCSIBlockReq *req = (SCSIBlockReq *)opaque;
|
||||
SCSIDiskReq *r = &req->req;
|
||||
SCSIDevice *s = r->req.dev;
|
||||
sg_io_hdr_t *io_hdr = &req->io_header;
|
||||
|
||||
if (ret == 0) {
|
||||
|
@ -2827,13 +2799,10 @@ static void scsi_block_sgio_complete(void *opaque, int ret)
|
|||
}
|
||||
|
||||
if (ret > 0) {
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.blk));
|
||||
if (scsi_handle_rw_error(r, ret, true)) {
|
||||
aio_context_release(blk_get_aio_context(s->conf.blk));
|
||||
scsi_req_unref(&r->req);
|
||||
return;
|
||||
}
|
||||
aio_context_release(blk_get_aio_context(s->conf.blk));
|
||||
|
||||
/* Ignore error. */
|
||||
ret = 0;
|
||||
|
|
|
@ -109,15 +109,11 @@ done:
|
|||
static void scsi_command_complete(void *opaque, int ret)
|
||||
{
|
||||
SCSIGenericReq *r = (SCSIGenericReq *)opaque;
|
||||
SCSIDevice *s = r->req.dev;
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.blk));
|
||||
|
||||
assert(r->req.aiocb != NULL);
|
||||
r->req.aiocb = NULL;
|
||||
|
||||
scsi_command_complete_noio(r, ret);
|
||||
aio_context_release(blk_get_aio_context(s->conf.blk));
|
||||
}
|
||||
|
||||
static int execute_command(BlockBackend *blk,
|
||||
|
@ -274,14 +270,12 @@ static void scsi_read_complete(void * opaque, int ret)
|
|||
SCSIDevice *s = r->req.dev;
|
||||
int len;
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.blk));
|
||||
|
||||
assert(r->req.aiocb != NULL);
|
||||
r->req.aiocb = NULL;
|
||||
|
||||
if (ret || r->req.io_canceled) {
|
||||
scsi_command_complete_noio(r, ret);
|
||||
goto done;
|
||||
return;
|
||||
}
|
||||
|
||||
len = r->io_header.dxfer_len - r->io_header.resid;
|
||||
|
@ -320,7 +314,7 @@ static void scsi_read_complete(void * opaque, int ret)
|
|||
r->io_header.status != GOOD ||
|
||||
len == 0) {
|
||||
scsi_command_complete_noio(r, 0);
|
||||
goto done;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Snoop READ CAPACITY output to set the blocksize. */
|
||||
|
@ -356,9 +350,6 @@ static void scsi_read_complete(void * opaque, int ret)
|
|||
req_complete:
|
||||
scsi_req_data(&r->req, len);
|
||||
scsi_req_unref(&r->req);
|
||||
|
||||
done:
|
||||
aio_context_release(blk_get_aio_context(s->conf.blk));
|
||||
}
|
||||
|
||||
/* Read more data from scsi device into buffer. */
|
||||
|
@ -391,14 +382,12 @@ static void scsi_write_complete(void * opaque, int ret)
|
|||
|
||||
trace_scsi_generic_write_complete(ret);
|
||||
|
||||
aio_context_acquire(blk_get_aio_context(s->conf.blk));
|
||||
|
||||
assert(r->req.aiocb != NULL);
|
||||
r->req.aiocb = NULL;
|
||||
|
||||
if (ret || r->req.io_canceled) {
|
||||
scsi_command_complete_noio(r, ret);
|
||||
goto done;
|
||||
return;
|
||||
}
|
||||
|
||||
if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
|
||||
|
@ -408,9 +397,6 @@ static void scsi_write_complete(void * opaque, int ret)
|
|||
}
|
||||
|
||||
scsi_command_complete_noio(r, ret);
|
||||
|
||||
done:
|
||||
aio_context_release(blk_get_aio_context(s->conf.blk));
|
||||
}
|
||||
|
||||
/* Write data to a scsi device. Returns nonzero on failure.
|
||||
|
|
|
@ -149,23 +149,17 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
|
|||
|
||||
memory_region_transaction_commit();
|
||||
|
||||
/*
|
||||
* These fields are visible to the IOThread so we rely on implicit barriers
|
||||
* in aio_context_acquire() on the write side and aio_notify_accept() on
|
||||
* the read side.
|
||||
*/
|
||||
s->dataplane_starting = false;
|
||||
s->dataplane_started = true;
|
||||
smp_wmb(); /* paired with aio_notify_accept() */
|
||||
|
||||
if (s->bus.drain_count == 0) {
|
||||
aio_context_acquire(s->ctx);
|
||||
virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
|
||||
virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
|
||||
|
||||
for (i = 0; i < vs->conf.num_queues; i++) {
|
||||
virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
|
||||
}
|
||||
aio_context_release(s->ctx);
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -123,6 +123,30 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
|
|||
virtio_scsi_free_req(req);
|
||||
}
|
||||
|
||||
static void virtio_scsi_complete_req_bh(void *opaque)
|
||||
{
|
||||
VirtIOSCSIReq *req = opaque;
|
||||
|
||||
virtio_scsi_complete_req(req);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop
|
||||
* thread cannot touch the virtqueue since that could race with an IOThread.
|
||||
*/
|
||||
static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req)
|
||||
{
|
||||
VirtIOSCSI *s = req->dev;
|
||||
|
||||
if (!s->ctx || s->ctx == qemu_get_aio_context()) {
|
||||
/* No need to schedule a BH when there is no IOThread */
|
||||
virtio_scsi_complete_req(req);
|
||||
} else {
|
||||
/* Run request completion in the IOThread */
|
||||
aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
|
||||
{
|
||||
virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
|
||||
|
@ -338,10 +362,7 @@ static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
|
|||
|
||||
out:
|
||||
object_unref(OBJECT(d));
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_complete_req(req);
|
||||
virtio_scsi_release(s);
|
||||
virtio_scsi_complete_req_from_main_loop(req);
|
||||
}
|
||||
|
||||
/* Some TMFs must be processed from the main loop thread */
|
||||
|
@ -354,18 +375,16 @@ static void virtio_scsi_do_tmf_bh(void *opaque)
|
|||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
|
||||
QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
|
||||
QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
|
||||
QTAILQ_INSERT_TAIL(&reqs, req, next);
|
||||
}
|
||||
|
||||
QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
|
||||
QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
|
||||
QTAILQ_INSERT_TAIL(&reqs, req, next);
|
||||
qemu_bh_delete(s->tmf_bh);
|
||||
s->tmf_bh = NULL;
|
||||
}
|
||||
|
||||
qemu_bh_delete(s->tmf_bh);
|
||||
s->tmf_bh = NULL;
|
||||
|
||||
virtio_scsi_release(s);
|
||||
|
||||
QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
|
||||
QTAILQ_REMOVE(&reqs, req, next);
|
||||
virtio_scsi_do_one_tmf_bh(req);
|
||||
|
@ -379,8 +398,7 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
|
|||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
|
||||
/* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */
|
||||
if (s->tmf_bh) {
|
||||
qemu_bh_delete(s->tmf_bh);
|
||||
s->tmf_bh = NULL;
|
||||
|
@ -393,19 +411,19 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
|
|||
req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
|
||||
virtio_scsi_complete_req(req);
|
||||
}
|
||||
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
|
||||
{
|
||||
VirtIOSCSI *s = req->dev;
|
||||
|
||||
QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
|
||||
WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
|
||||
QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
|
||||
|
||||
if (!s->tmf_bh) {
|
||||
s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
|
||||
qemu_bh_schedule(s->tmf_bh);
|
||||
if (!s->tmf_bh) {
|
||||
s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
|
||||
qemu_bh_schedule(s->tmf_bh);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -624,9 +642,7 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
|
|||
return;
|
||||
}
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_handle_ctrl_vq(s, vq);
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
|
||||
|
@ -864,9 +880,7 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
|
|||
return;
|
||||
}
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_handle_cmd_vq(s, vq);
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
static void virtio_scsi_get_config(VirtIODevice *vdev,
|
||||
|
@ -1013,9 +1027,7 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
|
|||
return;
|
||||
}
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_handle_event_vq(s, vq);
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
|
||||
|
@ -1034,9 +1046,7 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
|
|||
},
|
||||
};
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_push_event(s, &info);
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1053,17 +1063,13 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|||
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
|
||||
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
||||
SCSIDevice *sd = SCSI_DEVICE(dev);
|
||||
AioContext *old_context;
|
||||
int ret;
|
||||
|
||||
if (s->ctx && !s->dataplane_fenced) {
|
||||
if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
|
||||
return;
|
||||
}
|
||||
old_context = blk_get_aio_context(sd->conf.blk);
|
||||
aio_context_acquire(old_context);
|
||||
ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
|
||||
aio_context_release(old_context);
|
||||
if (ret < 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -1079,10 +1085,8 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|||
},
|
||||
};
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_push_event(s, &info);
|
||||
scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1104,17 +1108,13 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|||
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
|
||||
|
||||
if (s->ctx) {
|
||||
virtio_scsi_acquire(s);
|
||||
/* If other users keep the BlockBackend in the iothread, that's ok */
|
||||
blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_push_event(s, &info);
|
||||
scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1235,6 +1235,7 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
|
|||
Error *err = NULL;
|
||||
|
||||
QTAILQ_INIT(&s->tmf_bh_list);
|
||||
qemu_mutex_init(&s->tmf_bh_lock);
|
||||
|
||||
virtio_scsi_common_realize(dev,
|
||||
virtio_scsi_handle_ctrl,
|
||||
|
@ -1277,6 +1278,7 @@ static void virtio_scsi_device_unrealize(DeviceState *dev)
|
|||
|
||||
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
|
||||
virtio_scsi_common_unrealize(dev);
|
||||
qemu_mutex_destroy(&s->tmf_bh_lock);
|
||||
}
|
||||
|
||||
static Property virtio_scsi_properties[] = {
|
||||
|
|
|
@ -63,9 +63,6 @@ extern AioWait global_aio_wait;
|
|||
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
|
||||
* caller does not hold a lock) are involved in the polling condition.
|
||||
* @cond: wait while this conditional expression is true
|
||||
* @unlock: whether to unlock and then lock again @ctx. This applies
|
||||
* only when waiting for another AioContext from the main loop.
|
||||
* Otherwise it's ignored.
|
||||
*
|
||||
* Wait while a condition is true. Use this to implement synchronous
|
||||
* operations that require event loop activity.
|
||||
|
@ -78,7 +75,7 @@ extern AioWait global_aio_wait;
|
|||
* wait on conditions between two IOThreads since that could lead to deadlock,
|
||||
* go via the main loop instead.
|
||||
*/
|
||||
#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \
|
||||
#define AIO_WAIT_WHILE_INTERNAL(ctx, cond) ({ \
|
||||
bool waited_ = false; \
|
||||
AioWait *wait_ = &global_aio_wait; \
|
||||
AioContext *ctx_ = (ctx); \
|
||||
|
@ -95,13 +92,7 @@ extern AioWait global_aio_wait;
|
|||
assert(qemu_get_current_aio_context() == \
|
||||
qemu_get_aio_context()); \
|
||||
while ((cond)) { \
|
||||
if (unlock && ctx_) { \
|
||||
aio_context_release(ctx_); \
|
||||
} \
|
||||
aio_poll(qemu_get_aio_context(), true); \
|
||||
if (unlock && ctx_) { \
|
||||
aio_context_acquire(ctx_); \
|
||||
} \
|
||||
waited_ = true; \
|
||||
} \
|
||||
} \
|
||||
|
@ -109,10 +100,11 @@ extern AioWait global_aio_wait;
|
|||
waited_; })
|
||||
|
||||
#define AIO_WAIT_WHILE(ctx, cond) \
|
||||
AIO_WAIT_WHILE_INTERNAL(ctx, cond, true)
|
||||
AIO_WAIT_WHILE_INTERNAL(ctx, cond)
|
||||
|
||||
/* TODO replace this with AIO_WAIT_WHILE() in a future patch */
|
||||
#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \
|
||||
AIO_WAIT_WHILE_INTERNAL(ctx, cond, false)
|
||||
AIO_WAIT_WHILE_INTERNAL(ctx, cond)
|
||||
|
||||
/**
|
||||
* aio_wait_kick:
|
||||
|
|
|
@ -278,23 +278,6 @@ void aio_context_ref(AioContext *ctx);
|
|||
*/
|
||||
void aio_context_unref(AioContext *ctx);
|
||||
|
||||
/* Take ownership of the AioContext. If the AioContext will be shared between
|
||||
* threads, and a thread does not want to be interrupted, it will have to
|
||||
* take ownership around calls to aio_poll(). Otherwise, aio_poll()
|
||||
* automatically takes care of calling aio_context_acquire and
|
||||
* aio_context_release.
|
||||
*
|
||||
* Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A
|
||||
* thread still has to call those to avoid being interrupted by the guest.
|
||||
*
|
||||
* Bottom halves, timers and callbacks can be created or removed without
|
||||
* acquiring the AioContext.
|
||||
*/
|
||||
void aio_context_acquire(AioContext *ctx);
|
||||
|
||||
/* Relinquish ownership of the AioContext. */
|
||||
void aio_context_release(AioContext *ctx);
|
||||
|
||||
/**
|
||||
* aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
|
||||
* run only once and as soon as possible.
|
||||
|
|
|
@ -70,9 +70,6 @@
|
|||
* automatically takes the graph rdlock when calling the wrapped function. In
|
||||
* the same way, no_co_wrapper_bdrv_wrlock functions automatically take the
|
||||
* graph wrlock.
|
||||
*
|
||||
* If the first parameter of the function is a BlockDriverState, BdrvChild or
|
||||
* BlockBackend pointer, the AioContext lock for it is taken in the wrapper.
|
||||
*/
|
||||
#define no_co_wrapper
|
||||
#define no_co_wrapper_bdrv_rdlock
|
||||
|
|
|
@ -31,11 +31,10 @@
|
|||
/*
|
||||
* Global state (GS) API. These functions run under the BQL.
|
||||
*
|
||||
* If a function modifies the graph, it also uses drain and/or
|
||||
* aio_context_acquire/release to be sure it has unique access.
|
||||
* aio_context locking is needed together with BQL because of
|
||||
* the thread-safe I/O API that concurrently runs and accesses
|
||||
* the graph without the BQL.
|
||||
* If a function modifies the graph, it also uses the graph lock to be sure it
|
||||
* has unique access. The graph lock is needed together with BQL because of the
|
||||
* thread-safe I/O API that concurrently runs and accesses the graph without
|
||||
* the BQL.
|
||||
*
|
||||
* It is important to note that not all of these functions are
|
||||
* necessarily limited to running under the BQL, but they would
|
||||
|
@ -268,20 +267,6 @@ int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag);
|
|||
int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
|
||||
bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
|
||||
|
||||
/**
|
||||
* Locks the AioContext of @bs if it's not the current AioContext. This avoids
|
||||
* double locking which could lead to deadlocks: This is a coroutine_fn, so we
|
||||
* know we already own the lock of the current AioContext.
|
||||
*
|
||||
* May only be called in the main thread.
|
||||
*/
|
||||
void coroutine_fn bdrv_co_lock(BlockDriverState *bs);
|
||||
|
||||
/**
|
||||
* Unlocks the AioContext of @bs if it's not the current AioContext.
|
||||
*/
|
||||
void coroutine_fn bdrv_co_unlock(BlockDriverState *bs);
|
||||
|
||||
bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx,
|
||||
GHashTable *visited, Transaction *tran,
|
||||
Error **errp);
|
||||
|
|
|
@ -31,8 +31,7 @@
|
|||
|
||||
/*
|
||||
* I/O API functions. These functions are thread-safe, and therefore
|
||||
* can run in any thread as long as the thread has called
|
||||
* aio_context_acquire/release().
|
||||
* can run in any thread.
|
||||
*
|
||||
* These functions can only call functions from I/O and Common categories,
|
||||
* but can be invoked by GS, "I/O or GS" and I/O APIs.
|
||||
|
@ -333,11 +332,10 @@ bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
|
|||
* "I/O or GS" API functions. These functions can run without
|
||||
* the BQL, but only in one specific iothread/main loop.
|
||||
*
|
||||
* More specifically, these functions use BDRV_POLL_WHILE(bs), which
|
||||
* requires the caller to be either in the main thread and hold
|
||||
* the BlockdriverState (bs) AioContext lock, or directly in the
|
||||
* home thread that runs the bs AioContext. Calling them from
|
||||
* another thread in another AioContext would cause deadlocks.
|
||||
* More specifically, these functions use BDRV_POLL_WHILE(bs), which requires
|
||||
* the caller to be either in the main thread or directly in the home thread
|
||||
* that runs the bs AioContext. Calling them from another thread in another
|
||||
* AioContext would cause deadlocks.
|
||||
*
|
||||
* Therefore, these functions are not proper I/O, because they
|
||||
* can't run in *any* iothreads, but only in a specific one.
|
||||
|
|
|
@ -1192,8 +1192,6 @@ struct BlockDriverState {
|
|||
/* The error object in use for blocking operations on backing_hd */
|
||||
Error *backing_blocker;
|
||||
|
||||
/* Protected by AioContext lock */
|
||||
|
||||
/*
|
||||
* If we are reading a disk image, give its size in sectors.
|
||||
* Generally read-only; it is written to by load_snapshot and
|
||||
|
|
|
@ -110,34 +110,17 @@ void unregister_aiocontext(AioContext *ctx);
|
|||
*
|
||||
* The wrlock can only be taken from the main loop, with BQL held, as only the
|
||||
* main loop is allowed to modify the graph.
|
||||
*
|
||||
* If @bs is non-NULL, its AioContext is temporarily released.
|
||||
*
|
||||
* This function polls. Callers must not hold the lock of any AioContext other
|
||||
* than the current one and the one of @bs.
|
||||
*/
|
||||
void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA
|
||||
bdrv_graph_wrlock(BlockDriverState *bs);
|
||||
bdrv_graph_wrlock(void);
|
||||
|
||||
/*
|
||||
* bdrv_graph_wrunlock:
|
||||
* Write finished, reset global has_writer to 0 and restart
|
||||
* all readers that are waiting.
|
||||
*
|
||||
* If @bs is non-NULL, its AioContext is temporarily released.
|
||||
*/
|
||||
void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA
|
||||
bdrv_graph_wrunlock(BlockDriverState *bs);
|
||||
|
||||
/*
|
||||
* bdrv_graph_wrunlock_ctx:
|
||||
* Write finished, reset global has_writer to 0 and restart
|
||||
* all readers that are waiting.
|
||||
*
|
||||
* If @ctx is non-NULL, its lock is temporarily released.
|
||||
*/
|
||||
void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA
|
||||
bdrv_graph_wrunlock_ctx(AioContext *ctx);
|
||||
bdrv_graph_wrunlock(void);
|
||||
|
||||
/*
|
||||
* bdrv_graph_co_rdlock:
|
||||
|
|
|
@ -86,8 +86,6 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs,
|
|||
|
||||
/*
|
||||
* Group operations. All block drivers are involved.
|
||||
* These functions will properly handle dataplane (take aio_context_acquire
|
||||
* when appropriate for appropriate block drivers
|
||||
*/
|
||||
|
||||
bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
|
||||
|
|
|
@ -24,6 +24,7 @@ extern const PropertyInfo qdev_prop_off_auto_pcibar;
|
|||
extern const PropertyInfo qdev_prop_pcie_link_speed;
|
||||
extern const PropertyInfo qdev_prop_pcie_link_width;
|
||||
extern const PropertyInfo qdev_prop_cpus390entitlement;
|
||||
extern const PropertyInfo qdev_prop_iothread_vq_mapping_list;
|
||||
|
||||
#define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \
|
||||
DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t)
|
||||
|
@ -82,4 +83,8 @@ extern const PropertyInfo qdev_prop_cpus390entitlement;
|
|||
DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_cpus390entitlement, \
|
||||
CpuS390Entitlement)
|
||||
|
||||
#define DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST(_name, _state, _field) \
|
||||
DEFINE_PROP(_name, _state, _field, qdev_prop_iothread_vq_mapping_list, \
|
||||
IOThreadVirtQueueMappingList *)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -230,8 +230,8 @@ void qdev_property_add_static(DeviceState *dev, Property *prop);
|
|||
* @target: Device which has properties to be aliased
|
||||
* @source: Object to add alias properties to
|
||||
*
|
||||
* Add alias properties to the @source object for all qdev properties on
|
||||
* the @target DeviceState.
|
||||
* Add alias properties to the @source object for all properties on the @target
|
||||
* DeviceState.
|
||||
*
|
||||
* This is useful when @target is an internal implementation object
|
||||
* owned by @source, and you want to expose all the properties of that
|
||||
|
|
|
@ -69,14 +69,19 @@ struct SCSIDevice
|
|||
{
|
||||
DeviceState qdev;
|
||||
VMChangeStateEntry *vmsentry;
|
||||
QEMUBH *bh;
|
||||
uint32_t id;
|
||||
BlockConf conf;
|
||||
SCSISense unit_attention;
|
||||
bool sense_is_ua;
|
||||
uint8_t sense[SCSI_SENSE_BUF_SIZE];
|
||||
uint32_t sense_len;
|
||||
|
||||
/*
|
||||
* The requests list is only accessed from the AioContext that executes
|
||||
* requests or from the main loop when IOThread processing is stopped.
|
||||
*/
|
||||
QTAILQ_HEAD(, SCSIRequest) requests;
|
||||
|
||||
uint32_t channel;
|
||||
uint32_t lun;
|
||||
int blocksize;
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "sysemu/block-backend.h"
|
||||
#include "sysemu/block-ram-registrar.h"
|
||||
#include "qom/object.h"
|
||||
#include "qapi/qapi-types-virtio.h"
|
||||
|
||||
#define TYPE_VIRTIO_BLK "virtio-blk-device"
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK)
|
||||
|
@ -37,6 +38,7 @@ struct VirtIOBlkConf
|
|||
{
|
||||
BlockConf conf;
|
||||
IOThread *iothread;
|
||||
IOThreadVirtQueueMappingList *iothread_vq_mapping_list;
|
||||
char *serial;
|
||||
uint32_t request_merging;
|
||||
uint16_t num_queues;
|
||||
|
@ -54,7 +56,8 @@ struct VirtIOBlockReq;
|
|||
struct VirtIOBlock {
|
||||
VirtIODevice parent_obj;
|
||||
BlockBackend *blk;
|
||||
void *rq;
|
||||
QemuMutex rq_lock;
|
||||
void *rq; /* protected by rq_lock */
|
||||
VirtIOBlkConf conf;
|
||||
unsigned short sector_mask;
|
||||
bool original_wce;
|
||||
|
|
|
@ -85,8 +85,9 @@ struct VirtIOSCSI {
|
|||
|
||||
/*
|
||||
* TMFs deferred to main loop BH. These fields are protected by
|
||||
* virtio_scsi_acquire().
|
||||
* tmf_bh_lock.
|
||||
*/
|
||||
QemuMutex tmf_bh_lock;
|
||||
QEMUBH *tmf_bh;
|
||||
QTAILQ_HEAD(, VirtIOSCSIReq) tmf_bh_list;
|
||||
|
||||
|
@ -100,20 +101,6 @@ struct VirtIOSCSI {
|
|||
uint32_t host_features;
|
||||
};
|
||||
|
||||
static inline void virtio_scsi_acquire(VirtIOSCSI *s)
|
||||
{
|
||||
if (s->ctx) {
|
||||
aio_context_acquire(s->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void virtio_scsi_release(VirtIOSCSI *s)
|
||||
{
|
||||
if (s->ctx) {
|
||||
aio_context_release(s->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_scsi_common_realize(DeviceState *dev,
|
||||
VirtIOHandleOutput ctrl,
|
||||
VirtIOHandleOutput evt,
|
||||
|
|
|
@ -26,9 +26,9 @@ typedef struct StringOutputVisitor StringOutputVisitor;
|
|||
* If everything else succeeds, pass @result to visit_complete() to
|
||||
* collect the result of the visit.
|
||||
*
|
||||
* The string output visitor does not implement support for visiting
|
||||
* QAPI structs, alternates, null, or arbitrary QTypes. It also
|
||||
* requires a non-null list argument to visit_start_list().
|
||||
* The string output visitor does not implement support for alternates, null,
|
||||
* or arbitrary QTypes. Struct fields are not shown. It also requires a
|
||||
* non-null list argument to visit_start_list().
|
||||
*/
|
||||
Visitor *string_output_visitor_new(bool human, char **result);
|
||||
|
||||
|
|
|
@ -67,8 +67,6 @@ typedef struct Job {
|
|||
|
||||
/**
|
||||
* The completion function that will be called when the job completes.
|
||||
* Called with AioContext lock held, since many callback implementations
|
||||
* use bdrv_* functions that require to hold the lock.
|
||||
*/
|
||||
BlockCompletionFunc *cb;
|
||||
|
||||
|
@ -264,9 +262,6 @@ struct JobDriver {
|
|||
*
|
||||
* This callback will not be invoked if the job has already failed.
|
||||
* If it fails, abort and then clean will be called.
|
||||
*
|
||||
* Called with AioContext lock held, since many callbacs implementations
|
||||
* use bdrv_* functions that require to hold the lock.
|
||||
*/
|
||||
int (*prepare)(Job *job);
|
||||
|
||||
|
@ -277,9 +272,6 @@ struct JobDriver {
|
|||
*
|
||||
* All jobs will complete with a call to either .commit() or .abort() but
|
||||
* never both.
|
||||
*
|
||||
* Called with AioContext lock held, since many callback implementations
|
||||
* use bdrv_* functions that require to hold the lock.
|
||||
*/
|
||||
void (*commit)(Job *job);
|
||||
|
||||
|
@ -290,9 +282,6 @@ struct JobDriver {
|
|||
*
|
||||
* All jobs will complete with a call to either .commit() or .abort() but
|
||||
* never both.
|
||||
*
|
||||
* Called with AioContext lock held, since many callback implementations
|
||||
* use bdrv_* functions that require to hold the lock.
|
||||
*/
|
||||
void (*abort)(Job *job);
|
||||
|
||||
|
@ -301,9 +290,6 @@ struct JobDriver {
|
|||
* .commit() or .abort(). Regardless of which callback is invoked after
|
||||
* completion, .clean() will always be called, even if the job does not
|
||||
* belong to a transaction group.
|
||||
*
|
||||
* Called with AioContext lock held, since many callbacs implementations
|
||||
* use bdrv_* functions that require to hold the lock.
|
||||
*/
|
||||
void (*clean)(Job *job);
|
||||
|
||||
|
@ -318,17 +304,12 @@ struct JobDriver {
|
|||
* READY).
|
||||
* (If the callback is NULL, the job is assumed to terminate
|
||||
* without I/O.)
|
||||
*
|
||||
* Called with AioContext lock held, since many callback implementations
|
||||
* use bdrv_* functions that require to hold the lock.
|
||||
*/
|
||||
bool (*cancel)(Job *job, bool force);
|
||||
|
||||
|
||||
/**
|
||||
* Called when the job is freed.
|
||||
* Called with AioContext lock held, since many callback implementations
|
||||
* use bdrv_* functions that require to hold the lock.
|
||||
*/
|
||||
void (*free)(Job *job);
|
||||
};
|
||||
|
@ -424,7 +405,6 @@ void job_ref_locked(Job *job);
|
|||
* Release a reference that was previously acquired with job_ref_locked() or
|
||||
* job_create(). If it's the last reference to the object, it will be freed.
|
||||
*
|
||||
* Takes AioContext lock internally to invoke a job->driver callback.
|
||||
* Called with job lock held.
|
||||
*/
|
||||
void job_unref_locked(Job *job);
|
||||
|
|
16
job.c
16
job.c
|
@ -464,12 +464,8 @@ void job_unref_locked(Job *job)
|
|||
assert(!job->txn);
|
||||
|
||||
if (job->driver->free) {
|
||||
AioContext *aio_context = job->aio_context;
|
||||
job_unlock();
|
||||
/* FIXME: aiocontext lock is required because cb calls blk_unref */
|
||||
aio_context_acquire(aio_context);
|
||||
job->driver->free(job);
|
||||
aio_context_release(aio_context);
|
||||
job_lock();
|
||||
}
|
||||
|
||||
|
@ -840,12 +836,10 @@ static void job_clean(Job *job)
|
|||
|
||||
/*
|
||||
* Called with job_mutex held, but releases it temporarily.
|
||||
* Takes AioContext lock internally to invoke a job->driver callback.
|
||||
*/
|
||||
static int job_finalize_single_locked(Job *job)
|
||||
{
|
||||
int job_ret;
|
||||
AioContext *ctx = job->aio_context;
|
||||
|
||||
assert(job_is_completed_locked(job));
|
||||
|
||||
|
@ -854,7 +848,6 @@ static int job_finalize_single_locked(Job *job)
|
|||
|
||||
job_ret = job->ret;
|
||||
job_unlock();
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
if (!job_ret) {
|
||||
job_commit(job);
|
||||
|
@ -867,7 +860,6 @@ static int job_finalize_single_locked(Job *job)
|
|||
job->cb(job->opaque, job_ret);
|
||||
}
|
||||
|
||||
aio_context_release(ctx);
|
||||
job_lock();
|
||||
|
||||
/* Emit events only if we actually started */
|
||||
|
@ -886,17 +878,13 @@ static int job_finalize_single_locked(Job *job)
|
|||
|
||||
/*
|
||||
* Called with job_mutex held, but releases it temporarily.
|
||||
* Takes AioContext lock internally to invoke a job->driver callback.
|
||||
*/
|
||||
static void job_cancel_async_locked(Job *job, bool force)
|
||||
{
|
||||
AioContext *ctx = job->aio_context;
|
||||
GLOBAL_STATE_CODE();
|
||||
if (job->driver->cancel) {
|
||||
job_unlock();
|
||||
aio_context_acquire(ctx);
|
||||
force = job->driver->cancel(job, force);
|
||||
aio_context_release(ctx);
|
||||
job_lock();
|
||||
} else {
|
||||
/* No .cancel() means the job will behave as if force-cancelled */
|
||||
|
@ -931,7 +919,6 @@ static void job_cancel_async_locked(Job *job, bool force)
|
|||
|
||||
/*
|
||||
* Called with job_mutex held, but releases it temporarily.
|
||||
* Takes AioContext lock internally to invoke a job->driver callback.
|
||||
*/
|
||||
static void job_completed_txn_abort_locked(Job *job)
|
||||
{
|
||||
|
@ -979,15 +966,12 @@ static void job_completed_txn_abort_locked(Job *job)
|
|||
static int job_prepare_locked(Job *job)
|
||||
{
|
||||
int ret;
|
||||
AioContext *ctx = job->aio_context;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
if (job->ret == 0 && job->driver->prepare) {
|
||||
job_unlock();
|
||||
aio_context_acquire(ctx);
|
||||
ret = job->driver->prepare(job);
|
||||
aio_context_release(ctx);
|
||||
job_lock();
|
||||
job->ret = ret;
|
||||
job_update_rc_locked(job);
|
||||
|
|
|
@ -66,7 +66,7 @@ typedef struct BlkMigDevState {
|
|||
/* Protected by block migration lock. */
|
||||
int64_t completed_sectors;
|
||||
|
||||
/* During migration this is protected by iothread lock / AioContext.
|
||||
/* During migration this is protected by bdrv_dirty_bitmap_lock().
|
||||
* Allocation and free happen during setup and cleanup respectively.
|
||||
*/
|
||||
BdrvDirtyBitmap *dirty_bitmap;
|
||||
|
@ -101,7 +101,7 @@ typedef struct BlkMigState {
|
|||
int prev_progress;
|
||||
int bulk_completed;
|
||||
|
||||
/* Lock must be taken _inside_ the iothread lock and any AioContexts. */
|
||||
/* Lock must be taken _inside_ the iothread lock. */
|
||||
QemuMutex lock;
|
||||
} BlkMigState;
|
||||
|
||||
|
@ -270,7 +270,6 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
|
|||
|
||||
if (bmds->shared_base) {
|
||||
qemu_mutex_lock_iothread();
|
||||
aio_context_acquire(blk_get_aio_context(bb));
|
||||
/* Skip unallocated sectors; intentionally treats failure or
|
||||
* partial sector as an allocated sector */
|
||||
while (cur_sector < total_sectors &&
|
||||
|
@ -281,7 +280,6 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
|
|||
}
|
||||
cur_sector += count >> BDRV_SECTOR_BITS;
|
||||
}
|
||||
aio_context_release(blk_get_aio_context(bb));
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
|
@ -313,21 +311,16 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
|
|||
block_mig_state.submitted++;
|
||||
blk_mig_unlock();
|
||||
|
||||
/* We do not know if bs is under the main thread (and thus does
|
||||
* not acquire the AioContext when doing AIO) or rather under
|
||||
* dataplane. Thus acquire both the iothread mutex and the
|
||||
* AioContext.
|
||||
*
|
||||
* This is ugly and will disappear when we make bdrv_* thread-safe,
|
||||
* without the need to acquire the AioContext.
|
||||
/*
|
||||
* The migration thread does not have an AioContext. Lock the BQL so that
|
||||
* I/O runs in the main loop AioContext (see
|
||||
* qemu_get_current_aio_context()).
|
||||
*/
|
||||
qemu_mutex_lock_iothread();
|
||||
aio_context_acquire(blk_get_aio_context(bmds->blk));
|
||||
bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
|
||||
nr_sectors * BDRV_SECTOR_SIZE);
|
||||
blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
|
||||
0, blk_mig_read_cb, blk);
|
||||
aio_context_release(blk_get_aio_context(bmds->blk));
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
bmds->cur_sector = cur_sector + nr_sectors;
|
||||
|
@ -512,7 +505,7 @@ static void blk_mig_reset_dirty_cursor(void)
|
|||
}
|
||||
}
|
||||
|
||||
/* Called with iothread lock and AioContext taken. */
|
||||
/* Called with iothread lock taken. */
|
||||
|
||||
static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
|
||||
int is_async)
|
||||
|
@ -606,9 +599,7 @@ static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
|
|||
int ret = 1;
|
||||
|
||||
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
|
||||
aio_context_acquire(blk_get_aio_context(bmds->blk));
|
||||
ret = mig_save_device_dirty(f, bmds, is_async);
|
||||
aio_context_release(blk_get_aio_context(bmds->blk));
|
||||
if (ret <= 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -666,9 +657,9 @@ static int64_t get_remaining_dirty(void)
|
|||
int64_t dirty = 0;
|
||||
|
||||
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
|
||||
aio_context_acquire(blk_get_aio_context(bmds->blk));
|
||||
bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
|
||||
dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
|
||||
aio_context_release(blk_get_aio_context(bmds->blk));
|
||||
bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
|
||||
}
|
||||
|
||||
return dirty;
|
||||
|
@ -681,7 +672,6 @@ static void block_migration_cleanup_bmds(void)
|
|||
{
|
||||
BlkMigDevState *bmds;
|
||||
BlockDriverState *bs;
|
||||
AioContext *ctx;
|
||||
|
||||
unset_dirty_tracking();
|
||||
|
||||
|
@ -693,13 +683,7 @@ static void block_migration_cleanup_bmds(void)
|
|||
bdrv_op_unblock_all(bs, bmds->blocker);
|
||||
}
|
||||
error_free(bmds->blocker);
|
||||
|
||||
/* Save ctx, because bmds->blk can disappear during blk_unref. */
|
||||
ctx = blk_get_aio_context(bmds->blk);
|
||||
aio_context_acquire(ctx);
|
||||
blk_unref(bmds->blk);
|
||||
aio_context_release(ctx);
|
||||
|
||||
g_free(bmds->blk_name);
|
||||
g_free(bmds->aio_bitmap);
|
||||
g_free(bmds);
|
||||
|
|
|
@ -852,14 +852,11 @@ static void vm_completion(ReadLineState *rs, const char *str)
|
|||
|
||||
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
|
||||
SnapshotInfoList *snapshots, *snapshot;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
bool ok = false;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
if (bdrv_can_snapshot(bs)) {
|
||||
ok = bdrv_query_snapshot_info_list(bs, &snapshots, NULL) == 0;
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
if (!ok) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -3049,7 +3049,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
|
|||
int saved_vm_running;
|
||||
uint64_t vm_state_size;
|
||||
g_autoptr(GDateTime) now = g_date_time_new_now_local();
|
||||
AioContext *aio_context;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
|
@ -3092,7 +3091,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
|
|||
if (bs == NULL) {
|
||||
return false;
|
||||
}
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
saved_vm_running = runstate_is_running();
|
||||
|
||||
|
@ -3101,8 +3099,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
|
|||
|
||||
bdrv_drain_all_begin();
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
|
||||
memset(sn, 0, sizeof(*sn));
|
||||
|
||||
/* fill auxiliary fields */
|
||||
|
@ -3139,14 +3135,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
|
|||
goto the_end;
|
||||
}
|
||||
|
||||
/* The bdrv_all_create_snapshot() call that follows acquires the AioContext
|
||||
* for itself. BDRV_POLL_WHILE() does not support nested locking because
|
||||
* it only releases the lock once. Therefore synchronous I/O will deadlock
|
||||
* unless we release the AioContext before bdrv_all_create_snapshot().
|
||||
*/
|
||||
aio_context_release(aio_context);
|
||||
aio_context = NULL;
|
||||
|
||||
ret = bdrv_all_create_snapshot(sn, bs, vm_state_size,
|
||||
has_devices, devices, errp);
|
||||
if (ret < 0) {
|
||||
|
@ -3157,10 +3145,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
|
|||
ret = 0;
|
||||
|
||||
the_end:
|
||||
if (aio_context) {
|
||||
aio_context_release(aio_context);
|
||||
}
|
||||
|
||||
bdrv_drain_all_end();
|
||||
|
||||
if (saved_vm_running) {
|
||||
|
@ -3258,7 +3242,6 @@ bool load_snapshot(const char *name, const char *vmstate,
|
|||
QEMUSnapshotInfo sn;
|
||||
QEMUFile *f;
|
||||
int ret;
|
||||
AioContext *aio_context;
|
||||
MigrationIncomingState *mis = migration_incoming_get_current();
|
||||
|
||||
if (!bdrv_all_can_snapshot(has_devices, devices, errp)) {
|
||||
|
@ -3278,12 +3261,9 @@ bool load_snapshot(const char *name, const char *vmstate,
|
|||
if (!bs_vm_state) {
|
||||
return false;
|
||||
}
|
||||
aio_context = bdrv_get_aio_context(bs_vm_state);
|
||||
|
||||
/* Don't even try to load empty VM states */
|
||||
aio_context_acquire(aio_context);
|
||||
ret = bdrv_snapshot_find(bs_vm_state, &sn, name);
|
||||
aio_context_release(aio_context);
|
||||
if (ret < 0) {
|
||||
return false;
|
||||
} else if (sn.vm_state_size == 0) {
|
||||
|
@ -3320,10 +3300,8 @@ bool load_snapshot(const char *name, const char *vmstate,
|
|||
ret = -EINVAL;
|
||||
goto err_drain;
|
||||
}
|
||||
aio_context_acquire(aio_context);
|
||||
ret = qemu_loadvm_state(f);
|
||||
migration_incoming_state_destroy();
|
||||
aio_context_release(aio_context);
|
||||
|
||||
bdrv_drain_all_end();
|
||||
|
||||
|
|
210
nbd/server.c
210
nbd/server.c
|
@ -122,26 +122,28 @@ struct NBDMetaContexts {
|
|||
};
|
||||
|
||||
struct NBDClient {
|
||||
int refcount;
|
||||
int refcount; /* atomic */
|
||||
void (*close_fn)(NBDClient *client, bool negotiated);
|
||||
|
||||
QemuMutex lock;
|
||||
|
||||
NBDExport *exp;
|
||||
QCryptoTLSCreds *tlscreds;
|
||||
char *tlsauthz;
|
||||
QIOChannelSocket *sioc; /* The underlying data channel */
|
||||
QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
|
||||
|
||||
Coroutine *recv_coroutine;
|
||||
Coroutine *recv_coroutine; /* protected by lock */
|
||||
|
||||
CoMutex send_lock;
|
||||
Coroutine *send_coroutine;
|
||||
|
||||
bool read_yielding;
|
||||
bool quiescing;
|
||||
bool read_yielding; /* protected by lock */
|
||||
bool quiescing; /* protected by lock */
|
||||
|
||||
QTAILQ_ENTRY(NBDClient) next;
|
||||
int nb_requests;
|
||||
bool closing;
|
||||
int nb_requests; /* protected by lock */
|
||||
bool closing; /* protected by lock */
|
||||
|
||||
uint32_t check_align; /* If non-zero, check for aligned client requests */
|
||||
|
||||
|
@ -1415,11 +1417,18 @@ nbd_read_eof(NBDClient *client, void *buffer, size_t size, Error **errp)
|
|||
|
||||
len = qio_channel_readv(client->ioc, &iov, 1, errp);
|
||||
if (len == QIO_CHANNEL_ERR_BLOCK) {
|
||||
client->read_yielding = true;
|
||||
WITH_QEMU_LOCK_GUARD(&client->lock) {
|
||||
client->read_yielding = true;
|
||||
|
||||
/* Prompt main loop thread to re-run nbd_drained_poll() */
|
||||
aio_wait_kick();
|
||||
}
|
||||
qio_channel_yield(client->ioc, G_IO_IN);
|
||||
client->read_yielding = false;
|
||||
if (client->quiescing) {
|
||||
return -EAGAIN;
|
||||
WITH_QEMU_LOCK_GUARD(&client->lock) {
|
||||
client->read_yielding = false;
|
||||
if (client->quiescing) {
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
continue;
|
||||
} else if (len < 0) {
|
||||
|
@ -1501,14 +1510,17 @@ static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *reque
|
|||
|
||||
#define MAX_NBD_REQUESTS 16
|
||||
|
||||
/* Runs in export AioContext and main loop thread */
|
||||
void nbd_client_get(NBDClient *client)
|
||||
{
|
||||
client->refcount++;
|
||||
qatomic_inc(&client->refcount);
|
||||
}
|
||||
|
||||
void nbd_client_put(NBDClient *client)
|
||||
{
|
||||
if (--client->refcount == 0) {
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
if (qatomic_fetch_dec(&client->refcount) == 1) {
|
||||
/* The last reference should be dropped by client->close,
|
||||
* which is called by client_close.
|
||||
*/
|
||||
|
@ -1525,17 +1537,47 @@ void nbd_client_put(NBDClient *client)
|
|||
blk_exp_unref(&client->exp->common);
|
||||
}
|
||||
g_free(client->contexts.bitmaps);
|
||||
qemu_mutex_destroy(&client->lock);
|
||||
g_free(client);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Tries to release the reference to @client, but only if other references
|
||||
* remain. This is an optimization for the common case where we want to avoid
|
||||
* the expense of scheduling nbd_client_put() in the main loop thread.
|
||||
*
|
||||
* Returns true upon success or false if the reference was not released because
|
||||
* it is the last reference.
|
||||
*/
|
||||
static bool nbd_client_put_nonzero(NBDClient *client)
|
||||
{
|
||||
int old = qatomic_read(&client->refcount);
|
||||
int expected;
|
||||
|
||||
do {
|
||||
if (old == 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
expected = old;
|
||||
old = qatomic_cmpxchg(&client->refcount, expected, expected - 1);
|
||||
} while (old != expected);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void client_close(NBDClient *client, bool negotiated)
|
||||
{
|
||||
if (client->closing) {
|
||||
return;
|
||||
}
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
client->closing = true;
|
||||
WITH_QEMU_LOCK_GUARD(&client->lock) {
|
||||
if (client->closing) {
|
||||
return;
|
||||
}
|
||||
|
||||
client->closing = true;
|
||||
}
|
||||
|
||||
/* Force requests to finish. They will drop their own references,
|
||||
* then we'll close the socket and free the NBDClient.
|
||||
|
@ -1549,6 +1591,7 @@ static void client_close(NBDClient *client, bool negotiated)
|
|||
}
|
||||
}
|
||||
|
||||
/* Runs in export AioContext with client->lock held */
|
||||
static NBDRequestData *nbd_request_get(NBDClient *client)
|
||||
{
|
||||
NBDRequestData *req;
|
||||
|
@ -1557,11 +1600,11 @@ static NBDRequestData *nbd_request_get(NBDClient *client)
|
|||
client->nb_requests++;
|
||||
|
||||
req = g_new0(NBDRequestData, 1);
|
||||
nbd_client_get(client);
|
||||
req->client = client;
|
||||
return req;
|
||||
}
|
||||
|
||||
/* Runs in export AioContext with client->lock held */
|
||||
static void nbd_request_put(NBDRequestData *req)
|
||||
{
|
||||
NBDClient *client = req->client;
|
||||
|
@ -1578,8 +1621,6 @@ static void nbd_request_put(NBDRequestData *req)
|
|||
}
|
||||
|
||||
nbd_client_receive_next_request(client);
|
||||
|
||||
nbd_client_put(client);
|
||||
}
|
||||
|
||||
static void blk_aio_attached(AioContext *ctx, void *opaque)
|
||||
|
@ -1587,14 +1628,18 @@ static void blk_aio_attached(AioContext *ctx, void *opaque)
|
|||
NBDExport *exp = opaque;
|
||||
NBDClient *client;
|
||||
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
trace_nbd_blk_aio_attached(exp->name, ctx);
|
||||
|
||||
exp->common.ctx = ctx;
|
||||
|
||||
QTAILQ_FOREACH(client, &exp->clients, next) {
|
||||
assert(client->nb_requests == 0);
|
||||
assert(client->recv_coroutine == NULL);
|
||||
assert(client->send_coroutine == NULL);
|
||||
WITH_QEMU_LOCK_GUARD(&client->lock) {
|
||||
assert(client->nb_requests == 0);
|
||||
assert(client->recv_coroutine == NULL);
|
||||
assert(client->send_coroutine == NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1602,6 +1647,8 @@ static void blk_aio_detach(void *opaque)
|
|||
{
|
||||
NBDExport *exp = opaque;
|
||||
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
trace_nbd_blk_aio_detach(exp->name, exp->common.ctx);
|
||||
|
||||
exp->common.ctx = NULL;
|
||||
|
@ -1612,8 +1659,12 @@ static void nbd_drained_begin(void *opaque)
|
|||
NBDExport *exp = opaque;
|
||||
NBDClient *client;
|
||||
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
QTAILQ_FOREACH(client, &exp->clients, next) {
|
||||
client->quiescing = true;
|
||||
WITH_QEMU_LOCK_GUARD(&client->lock) {
|
||||
client->quiescing = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1622,28 +1673,48 @@ static void nbd_drained_end(void *opaque)
|
|||
NBDExport *exp = opaque;
|
||||
NBDClient *client;
|
||||
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
QTAILQ_FOREACH(client, &exp->clients, next) {
|
||||
client->quiescing = false;
|
||||
nbd_client_receive_next_request(client);
|
||||
WITH_QEMU_LOCK_GUARD(&client->lock) {
|
||||
client->quiescing = false;
|
||||
nbd_client_receive_next_request(client);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Runs in export AioContext */
|
||||
static void nbd_wake_read_bh(void *opaque)
|
||||
{
|
||||
NBDClient *client = opaque;
|
||||
qio_channel_wake_read(client->ioc);
|
||||
}
|
||||
|
||||
static bool nbd_drained_poll(void *opaque)
|
||||
{
|
||||
NBDExport *exp = opaque;
|
||||
NBDClient *client;
|
||||
|
||||
QTAILQ_FOREACH(client, &exp->clients, next) {
|
||||
if (client->nb_requests != 0) {
|
||||
/*
|
||||
* If there's a coroutine waiting for a request on nbd_read_eof()
|
||||
* enter it here so we don't depend on the client to wake it up.
|
||||
*/
|
||||
if (client->recv_coroutine != NULL && client->read_yielding) {
|
||||
qio_channel_wake_read(client->ioc);
|
||||
}
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
return true;
|
||||
QTAILQ_FOREACH(client, &exp->clients, next) {
|
||||
WITH_QEMU_LOCK_GUARD(&client->lock) {
|
||||
if (client->nb_requests != 0) {
|
||||
/*
|
||||
* If there's a coroutine waiting for a request on nbd_read_eof()
|
||||
* enter it here so we don't depend on the client to wake it up.
|
||||
*
|
||||
* Schedule a BH in the export AioContext to avoid missing the
|
||||
* wake up due to the race between qio_channel_wake_read() and
|
||||
* qio_channel_yield().
|
||||
*/
|
||||
if (client->recv_coroutine != NULL && client->read_yielding) {
|
||||
aio_bh_schedule_oneshot(nbd_export_aio_context(client->exp),
|
||||
nbd_wake_read_bh, client);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1654,6 +1725,8 @@ static void nbd_eject_notifier(Notifier *n, void *data)
|
|||
{
|
||||
NBDExport *exp = container_of(n, NBDExport, eject_notifier);
|
||||
|
||||
assert(qemu_in_main_thread());
|
||||
|
||||
blk_exp_request_shutdown(&exp->common);
|
||||
}
|
||||
|
||||
|
@ -2539,7 +2612,6 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req,
|
|||
int ret;
|
||||
|
||||
g_assert(qemu_in_coroutine());
|
||||
assert(client->recv_coroutine == qemu_coroutine_self());
|
||||
ret = nbd_receive_request(client, request, errp);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
|
@ -2936,15 +3008,23 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
|||
static coroutine_fn void nbd_trip(void *opaque)
|
||||
{
|
||||
NBDClient *client = opaque;
|
||||
NBDRequestData *req;
|
||||
NBDRequestData *req = NULL;
|
||||
NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */
|
||||
int ret;
|
||||
Error *local_err = NULL;
|
||||
|
||||
/*
|
||||
* Note that nbd_client_put() and client_close() must be called from the
|
||||
* main loop thread. Use aio_co_reschedule_self() to switch AioContext
|
||||
* before calling these functions.
|
||||
*/
|
||||
|
||||
trace_nbd_trip();
|
||||
|
||||
qemu_mutex_lock(&client->lock);
|
||||
|
||||
if (client->closing) {
|
||||
nbd_client_put(client);
|
||||
return;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (client->quiescing) {
|
||||
|
@ -2952,14 +3032,27 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|||
* We're switching between AIO contexts. Don't attempt to receive a new
|
||||
* request and kick the main context which may be waiting for us.
|
||||
*/
|
||||
nbd_client_put(client);
|
||||
client->recv_coroutine = NULL;
|
||||
aio_wait_kick();
|
||||
return;
|
||||
goto done;
|
||||
}
|
||||
|
||||
req = nbd_request_get(client);
|
||||
ret = nbd_co_receive_request(req, &request, &local_err);
|
||||
|
||||
/*
|
||||
* nbd_co_receive_request() returns -EAGAIN when nbd_drained_begin() has
|
||||
* set client->quiescing but by the time we get back nbd_drained_end() may
|
||||
* have already cleared client->quiescing. In that case we try again
|
||||
* because nothing else will spawn an nbd_trip() coroutine until we set
|
||||
* client->recv_coroutine = NULL further down.
|
||||
*/
|
||||
do {
|
||||
assert(client->recv_coroutine == qemu_coroutine_self());
|
||||
qemu_mutex_unlock(&client->lock);
|
||||
ret = nbd_co_receive_request(req, &request, &local_err);
|
||||
qemu_mutex_lock(&client->lock);
|
||||
} while (ret == -EAGAIN && !client->quiescing);
|
||||
|
||||
client->recv_coroutine = NULL;
|
||||
|
||||
if (client->closing) {
|
||||
|
@ -2971,15 +3064,16 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|||
}
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
assert(client->quiescing);
|
||||
goto done;
|
||||
}
|
||||
|
||||
nbd_client_receive_next_request(client);
|
||||
|
||||
if (ret == -EIO) {
|
||||
goto disconnect;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock(&client->lock);
|
||||
qio_channel_set_cork(client->ioc, true);
|
||||
|
||||
if (ret < 0) {
|
||||
|
@ -2999,6 +3093,10 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|||
g_free(request.contexts->bitmaps);
|
||||
g_free(request.contexts);
|
||||
}
|
||||
|
||||
qio_channel_set_cork(client->ioc, false);
|
||||
qemu_mutex_lock(&client->lock);
|
||||
|
||||
if (ret < 0) {
|
||||
error_prepend(&local_err, "Failed to send reply: ");
|
||||
goto disconnect;
|
||||
|
@ -3013,21 +3111,36 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|||
goto disconnect;
|
||||
}
|
||||
|
||||
qio_channel_set_cork(client->ioc, false);
|
||||
done:
|
||||
nbd_request_put(req);
|
||||
nbd_client_put(client);
|
||||
if (req) {
|
||||
nbd_request_put(req);
|
||||
}
|
||||
|
||||
qemu_mutex_unlock(&client->lock);
|
||||
|
||||
if (!nbd_client_put_nonzero(client)) {
|
||||
aio_co_reschedule_self(qemu_get_aio_context());
|
||||
nbd_client_put(client);
|
||||
}
|
||||
return;
|
||||
|
||||
disconnect:
|
||||
if (local_err) {
|
||||
error_reportf_err(local_err, "Disconnect client, due to: ");
|
||||
}
|
||||
|
||||
nbd_request_put(req);
|
||||
qemu_mutex_unlock(&client->lock);
|
||||
|
||||
aio_co_reschedule_self(qemu_get_aio_context());
|
||||
client_close(client, true);
|
||||
nbd_client_put(client);
|
||||
}
|
||||
|
||||
/*
|
||||
* Runs in export AioContext and main loop thread. Caller must hold
|
||||
* client->lock.
|
||||
*/
|
||||
static void nbd_client_receive_next_request(NBDClient *client)
|
||||
{
|
||||
if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS &&
|
||||
|
@ -3053,7 +3166,9 @@ static coroutine_fn void nbd_co_client_start(void *opaque)
|
|||
return;
|
||||
}
|
||||
|
||||
nbd_client_receive_next_request(client);
|
||||
WITH_QEMU_LOCK_GUARD(&client->lock) {
|
||||
nbd_client_receive_next_request(client);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3070,6 +3185,7 @@ void nbd_client_new(QIOChannelSocket *sioc,
|
|||
Coroutine *co;
|
||||
|
||||
client = g_new0(NBDClient, 1);
|
||||
qemu_mutex_init(&client->lock);
|
||||
client->refcount = 1;
|
||||
client->tlscreds = tlscreds;
|
||||
if (tlscreds) {
|
||||
|
|
|
@ -1439,12 +1439,10 @@ static void colo_compare_finalize(Object *obj)
|
|||
qemu_bh_delete(s->event_bh);
|
||||
|
||||
AioContext *ctx = iothread_get_aio_context(s->iothread);
|
||||
aio_context_acquire(ctx);
|
||||
AIO_WAIT_WHILE(ctx, !s->out_sendco.done);
|
||||
if (s->notify_dev) {
|
||||
AIO_WAIT_WHILE(ctx, !s->notify_sendco.done);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
|
||||
/* Release all unhandled packets after compare thead exited */
|
||||
g_queue_foreach(&s->conn_list, colo_flush_packets, s);
|
||||
|
|
|
@ -292,6 +292,20 @@ static bool print_type_null(Visitor *v, const char *name, QNull **obj,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool start_struct(Visitor *v, const char *name, void **obj,
|
||||
size_t size, Error **errp)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void end_struct(Visitor *v, void **obj)
|
||||
{
|
||||
StringOutputVisitor *sov = to_sov(v);
|
||||
|
||||
/* TODO actually print struct fields */
|
||||
string_output_set(sov, g_strdup("<omitted>"));
|
||||
}
|
||||
|
||||
static bool
|
||||
start_list(Visitor *v, const char *name, GenericList **list, size_t size,
|
||||
Error **errp)
|
||||
|
@ -379,6 +393,8 @@ Visitor *string_output_visitor_new(bool human, char **result)
|
|||
v->visitor.type_str = print_type_str;
|
||||
v->visitor.type_number = print_type_number;
|
||||
v->visitor.type_null = print_type_null;
|
||||
v->visitor.start_struct = start_struct;
|
||||
v->visitor.end_struct = end_struct;
|
||||
v->visitor.start_list = start_list;
|
||||
v->visitor.next_list = next_list;
|
||||
v->visitor.end_list = end_list;
|
||||
|
|
|
@ -928,3 +928,32 @@
|
|||
'data': { 'path': 'str', 'queue': 'uint16', '*index': 'uint16' },
|
||||
'returns': 'VirtioQueueElement',
|
||||
'features': [ 'unstable' ] }
|
||||
|
||||
##
|
||||
# @IOThreadVirtQueueMapping:
|
||||
#
|
||||
# Describes the subset of virtqueues assigned to an IOThread.
|
||||
#
|
||||
# @iothread: the id of IOThread object
|
||||
#
|
||||
# @vqs: an optional array of virtqueue indices that will be handled by this
|
||||
# IOThread. When absent, virtqueues are assigned round-robin across all
|
||||
# IOThreadVirtQueueMappings provided. Either all IOThreadVirtQueueMappings
|
||||
# must have @vqs or none of them must have it.
|
||||
#
|
||||
# Since: 9.0
|
||||
##
|
||||
|
||||
{ 'struct': 'IOThreadVirtQueueMapping',
|
||||
'data': { 'iothread': 'str', '*vqs': ['uint16'] } }
|
||||
|
||||
##
|
||||
# @DummyVirtioForceArrays:
|
||||
#
|
||||
# Not used by QMP; hack to let us use IOThreadVirtQueueMappingList internally
|
||||
#
|
||||
# Since: 9.0
|
||||
##
|
||||
|
||||
{ 'struct': 'DummyVirtioForceArrays',
|
||||
'data': { 'unused-iothread-vq-mapping': ['IOThreadVirtQueueMapping'] } }
|
||||
|
|
|
@ -960,7 +960,6 @@ static int img_commit(int argc, char **argv)
|
|||
Error *local_err = NULL;
|
||||
CommonBlockJobCBInfo cbi;
|
||||
bool image_opts = false;
|
||||
AioContext *aio_context;
|
||||
int64_t rate_limit = 0;
|
||||
|
||||
fmt = NULL;
|
||||
|
@ -1078,12 +1077,9 @@ static int img_commit(int argc, char **argv)
|
|||
.bs = bs,
|
||||
};
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
commit_active_start("commit", bs, base_bs, JOB_DEFAULT, rate_limit,
|
||||
BLOCKDEV_ON_ERROR_REPORT, NULL, common_block_job_cb,
|
||||
&cbi, false, &local_err);
|
||||
aio_context_release(aio_context);
|
||||
if (local_err) {
|
||||
goto done;
|
||||
}
|
||||
|
|
10
qemu-io.c
10
qemu-io.c
|
@ -414,15 +414,7 @@ static void prep_fetchline(void *opaque)
|
|||
|
||||
static int do_qemuio_command(const char *cmd)
|
||||
{
|
||||
int ret;
|
||||
AioContext *ctx =
|
||||
qemuio_blk ? blk_get_aio_context(qemuio_blk) : qemu_get_aio_context();
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
ret = qemuio_command(qemuio_blk, cmd);
|
||||
aio_context_release(ctx);
|
||||
|
||||
return ret;
|
||||
return qemuio_command(qemuio_blk, cmd);
|
||||
}
|
||||
|
||||
static int command_loop(void)
|
||||
|
|
|
@ -1123,9 +1123,7 @@ int main(int argc, char **argv)
|
|||
qdict_put_str(raw_opts, "file", bs->node_name);
|
||||
qdict_put_int(raw_opts, "offset", dev_offset);
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
bs = bdrv_open(NULL, NULL, raw_opts, flags, &error_fatal);
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
|
||||
blk_remove_bs(blk);
|
||||
blk_insert_bs(blk, bs, &error_fatal);
|
||||
|
|
|
@ -144,7 +144,6 @@ static char *replay_find_nearest_snapshot(int64_t icount,
|
|||
char *ret = NULL;
|
||||
int rv;
|
||||
int nb_sns, i;
|
||||
AioContext *aio_context;
|
||||
|
||||
*snapshot_icount = -1;
|
||||
|
||||
|
@ -152,11 +151,8 @@ static char *replay_find_nearest_snapshot(int64_t icount,
|
|||
if (!bs) {
|
||||
goto fail;
|
||||
}
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
nb_sns = bdrv_snapshot_list(bs, &sn_tab);
|
||||
aio_context_release(aio_context);
|
||||
|
||||
for (i = 0; i < nb_sns; i++) {
|
||||
rv = bdrv_all_has_snapshot(sn_tab[i].name, false, NULL, NULL);
|
||||
|
|
|
@ -92,8 +92,6 @@ class FuncDecl:
|
|||
f"{self.name}")
|
||||
self.target_name = f'{subsystem}_{subname}'
|
||||
|
||||
self.ctx = self.gen_ctx()
|
||||
|
||||
self.get_result = 's->ret = '
|
||||
self.ret = 'return s.ret;'
|
||||
self.co_ret = 'return '
|
||||
|
@ -167,7 +165,7 @@ def create_mixed_wrapper(func: FuncDecl) -> str:
|
|||
{func.co_ret}{name}({ func.gen_list('{name}') });
|
||||
}} else {{
|
||||
{struct_name} s = {{
|
||||
.poll_state.ctx = {func.ctx},
|
||||
.poll_state.ctx = qemu_get_current_aio_context(),
|
||||
.poll_state.in_progress = true,
|
||||
|
||||
{ func.gen_block(' .{name} = {name},') }
|
||||
|
@ -191,7 +189,7 @@ def create_co_wrapper(func: FuncDecl) -> str:
|
|||
{func.return_type} {func.name}({ func.gen_list('{decl}') })
|
||||
{{
|
||||
{struct_name} s = {{
|
||||
.poll_state.ctx = {func.ctx},
|
||||
.poll_state.ctx = qemu_get_current_aio_context(),
|
||||
.poll_state.in_progress = true,
|
||||
|
||||
{ func.gen_block(' .{name} = {name},') }
|
||||
|
@ -261,8 +259,8 @@ def gen_no_co_wrapper(func: FuncDecl) -> str:
|
|||
graph_lock=' bdrv_graph_rdlock_main_loop();'
|
||||
graph_unlock=' bdrv_graph_rdunlock_main_loop();'
|
||||
elif func.graph_wrlock:
|
||||
graph_lock=' bdrv_graph_wrlock(NULL);'
|
||||
graph_unlock=' bdrv_graph_wrunlock(NULL);'
|
||||
graph_lock=' bdrv_graph_wrlock();'
|
||||
graph_unlock=' bdrv_graph_wrunlock();'
|
||||
|
||||
return f"""\
|
||||
/*
|
||||
|
@ -278,12 +276,9 @@ typedef struct {struct_name} {{
|
|||
static void {name}_bh(void *opaque)
|
||||
{{
|
||||
{struct_name} *s = opaque;
|
||||
AioContext *ctx = {func.gen_ctx('s->')};
|
||||
|
||||
{graph_lock}
|
||||
aio_context_acquire(ctx);
|
||||
{func.get_result}{name}({ func.gen_list('s->{name}') });
|
||||
aio_context_release(ctx);
|
||||
{graph_unlock}
|
||||
|
||||
aio_co_wake(s->co);
|
||||
|
|
|
@ -119,13 +119,15 @@ static void dma_blk_cb(void *opaque, int ret)
|
|||
|
||||
trace_dma_blk_cb(dbs, ret);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
/* DMAAIOCB is not thread-safe and must be accessed only from dbs->ctx */
|
||||
assert(ctx == qemu_get_current_aio_context());
|
||||
|
||||
dbs->acb = NULL;
|
||||
dbs->offset += dbs->iov.size;
|
||||
|
||||
if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
|
||||
dma_complete(dbs, ret);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
dma_blk_unmap(dbs);
|
||||
|
||||
|
@ -168,7 +170,7 @@ static void dma_blk_cb(void *opaque, int ret)
|
|||
trace_dma_map_wait(dbs);
|
||||
dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs);
|
||||
cpu_register_map_client(dbs->bh);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
|
||||
|
@ -179,8 +181,6 @@ static void dma_blk_cb(void *opaque, int ret)
|
|||
dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
|
||||
dma_blk_cb, dbs, dbs->io_func_opaque);
|
||||
assert(dbs->acb);
|
||||
out:
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
static void dma_aio_cancel(BlockAIOCB *acb)
|
||||
|
|
|
@ -2426,6 +2426,10 @@ static void qemu_validate_options(const QDict *machine_opts)
|
|||
}
|
||||
}
|
||||
|
||||
if (loadvm && incoming) {
|
||||
error_report("'incoming' and 'loadvm' options are mutually exclusive");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if (loadvm && preconfig_requested) {
|
||||
error_report("'preconfig' and 'loadvm' options are "
|
||||
"mutually exclusive");
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
# Check that QMP 'transaction' blockdev-snapshot-sync with multiple drives on a
|
||||
# single IOThread completes successfully. This particular command triggered a
|
||||
# hang due to recursive AioContext locking and BDRV_POLL_WHILE(). Protect
|
||||
# against regressions.
|
||||
# against regressions even though the AioContext lock no longer exists.
|
||||
|
||||
import iotests
|
||||
|
||||
|
|
|
@ -21,7 +21,8 @@
|
|||
# Check that QMP 'migrate' with multiple drives on a single IOThread completes
|
||||
# successfully. This particular command triggered a hang in the source QEMU
|
||||
# process due to recursive AioContext locking in bdrv_invalidate_all() and
|
||||
# BDRV_POLL_WHILE().
|
||||
# BDRV_POLL_WHILE(). Protect against regressions even though the AioContext
|
||||
# lock no longer exists.
|
||||
|
||||
import iotests
|
||||
|
||||
|
|
|
@ -0,0 +1,170 @@
|
|||
#!/usr/bin/env bash
|
||||
# group: rw quick
|
||||
#
|
||||
# Test case for internal snapshots in qcow2
|
||||
#
|
||||
# Copyright (C) 2023 Red Hat, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
# creator
|
||||
owner=kwolf@redhat.com
|
||||
|
||||
seq="$(basename $0)"
|
||||
echo "QA output created by $seq"
|
||||
|
||||
status=1 # failure is the default!
|
||||
|
||||
_cleanup()
|
||||
{
|
||||
_cleanup_test_img
|
||||
}
|
||||
trap "_cleanup; exit \$status" 0 1 2 3 15
|
||||
|
||||
# get standard environment, filters and checks
|
||||
. ../common.rc
|
||||
. ../common.filter
|
||||
|
||||
# This tests qcow2-specific low-level functionality
|
||||
_supported_fmt qcow2
|
||||
_supported_proto generic
|
||||
# Internal snapshots are (currently) impossible with refcount_bits=1,
|
||||
# and generally impossible with external data files
|
||||
_unsupported_imgopts 'compat=0.10' 'refcount_bits=1[^0-9]' data_file
|
||||
|
||||
IMG_SIZE=64M
|
||||
|
||||
_qemu()
|
||||
{
|
||||
$QEMU -no-shutdown -nographic -monitor stdio -serial none \
|
||||
-blockdev file,filename="$TEST_IMG",node-name=disk0-file \
|
||||
-blockdev "$IMGFMT",file=disk0-file,node-name=disk0 \
|
||||
-object iothread,id=iothread0 \
|
||||
-device virtio-scsi,iothread=iothread0 \
|
||||
-device scsi-hd,drive=disk0,share-rw=on \
|
||||
"$@" 2>&1 |\
|
||||
_filter_qemu | _filter_hmp | _filter_qemu_io
|
||||
}
|
||||
|
||||
_make_test_img $IMG_SIZE
|
||||
|
||||
echo
|
||||
echo "=== Write some data, take a snapshot and overwrite part of it ==="
|
||||
echo
|
||||
|
||||
{
|
||||
echo 'qemu-io disk0 "write -P0x11 0 1M"'
|
||||
# Give qemu some time to boot before saving the VM state
|
||||
sleep 0.5
|
||||
echo "savevm snap0"
|
||||
echo 'qemu-io disk0 "write -P0x22 0 512k"'
|
||||
echo "quit"
|
||||
} | _qemu
|
||||
|
||||
echo
|
||||
$QEMU_IMG snapshot -l "$TEST_IMG" | _filter_date | _filter_vmstate_size
|
||||
_check_test_img
|
||||
|
||||
echo
|
||||
echo "=== Verify that loading the snapshot reverts to the old content ==="
|
||||
echo
|
||||
|
||||
{
|
||||
# -loadvm reverted the write from the previous QEMU instance
|
||||
echo 'qemu-io disk0 "read -P0x11 0 1M"'
|
||||
|
||||
# Verify that it works without restarting QEMU, too
|
||||
echo 'qemu-io disk0 "write -P0x33 512k 512k"'
|
||||
echo "loadvm snap0"
|
||||
echo 'qemu-io disk0 "read -P0x11 0 1M"'
|
||||
|
||||
# Verify COW by writing a partial cluster
|
||||
echo 'qemu-io disk0 "write -P0x33 63k 2k"'
|
||||
echo 'qemu-io disk0 "read -P0x11 0 63k"'
|
||||
echo 'qemu-io disk0 "read -P0x33 63k 2k"'
|
||||
echo 'qemu-io disk0 "read -P0x11 65k 63k"'
|
||||
|
||||
# Take a second snapshot
|
||||
echo "savevm snap1"
|
||||
|
||||
echo "quit"
|
||||
} | _qemu -loadvm snap0
|
||||
|
||||
echo
|
||||
$QEMU_IMG snapshot -l "$TEST_IMG" | _filter_date | _filter_vmstate_size
|
||||
_check_test_img
|
||||
|
||||
echo
|
||||
echo "=== qemu-img snapshot can revert to snapshots ==="
|
||||
echo
|
||||
|
||||
$QEMU_IMG snapshot -a snap0 "$TEST_IMG"
|
||||
$QEMU_IO -c "read -P0x11 0 1M" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IMG snapshot -a snap1 "$TEST_IMG"
|
||||
$QEMU_IO \
|
||||
-c "read -P0x11 0 63k" \
|
||||
-c "read -P0x33 63k 2k" \
|
||||
-c "read -P0x11 65k 63k" \
|
||||
"$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
echo
|
||||
echo "=== Deleting snapshots ==="
|
||||
echo
|
||||
{
|
||||
# The active layer stays unaffected by deleting the snapshot
|
||||
echo "delvm snap1"
|
||||
echo 'qemu-io disk0 "read -P0x11 0 63k"'
|
||||
echo 'qemu-io disk0 "read -P0x33 63k 2k"'
|
||||
echo 'qemu-io disk0 "read -P0x11 65k 63k"'
|
||||
|
||||
echo "quit"
|
||||
} | _qemu
|
||||
|
||||
|
||||
echo
|
||||
$QEMU_IMG snapshot -l "$TEST_IMG" | _filter_date | _filter_vmstate_size
|
||||
_check_test_img
|
||||
|
||||
echo
|
||||
echo "=== Error cases ==="
|
||||
echo
|
||||
|
||||
# snap1 should not exist any more
|
||||
_qemu -loadvm snap1
|
||||
|
||||
echo
|
||||
{
|
||||
echo "loadvm snap1"
|
||||
echo "quit"
|
||||
} | _qemu
|
||||
|
||||
# Snapshot operations and inactive images are incompatible
|
||||
echo
|
||||
_qemu -loadvm snap0 -incoming defer
|
||||
{
|
||||
echo "loadvm snap0"
|
||||
echo "delvm snap0"
|
||||
echo "savevm snap1"
|
||||
echo "quit"
|
||||
} | _qemu -incoming defer
|
||||
|
||||
# -loadvm and -preconfig are incompatible
|
||||
echo
|
||||
_qemu -loadvm snap0 -preconfig
|
||||
|
||||
# success, all done
|
||||
echo "*** done"
|
||||
rm -f $seq.full
|
||||
status=0
|
|
@ -0,0 +1,107 @@
|
|||
QA output created by qcow2-internal-snapshots
|
||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
|
||||
|
||||
=== Write some data, take a snapshot and overwrite part of it ===
|
||||
|
||||
QEMU X.Y.Z monitor - type 'help' for more information
|
||||
(qemu) qemu-io disk0 "write -P0x11 0 1M"
|
||||
wrote 1048576/1048576 bytes at offset 0
|
||||
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) savevm snap0
|
||||
(qemu) qemu-io disk0 "write -P0x22 0 512k"
|
||||
wrote 524288/524288 bytes at offset 0
|
||||
512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) quit
|
||||
|
||||
Snapshot list:
|
||||
ID TAG VM SIZE DATE VM CLOCK ICOUNT
|
||||
1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000
|
||||
No errors were found on the image.
|
||||
|
||||
=== Verify that loading the snapshot reverts to the old content ===
|
||||
|
||||
QEMU X.Y.Z monitor - type 'help' for more information
|
||||
(qemu) qemu-io disk0 "read -P0x11 0 1M"
|
||||
read 1048576/1048576 bytes at offset 0
|
||||
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) qemu-io disk0 "write -P0x33 512k 512k"
|
||||
wrote 524288/524288 bytes at offset 524288
|
||||
512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) loadvm snap0
|
||||
(qemu) qemu-io disk0 "read -P0x11 0 1M"
|
||||
read 1048576/1048576 bytes at offset 0
|
||||
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) qemu-io disk0 "write -P0x33 63k 2k"
|
||||
wrote 2048/2048 bytes at offset 64512
|
||||
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) qemu-io disk0 "read -P0x11 0 63k"
|
||||
read 64512/64512 bytes at offset 0
|
||||
63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) qemu-io disk0 "read -P0x33 63k 2k"
|
||||
read 2048/2048 bytes at offset 64512
|
||||
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) qemu-io disk0 "read -P0x11 65k 63k"
|
||||
read 64512/64512 bytes at offset 66560
|
||||
63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) savevm snap1
|
||||
(qemu) quit
|
||||
|
||||
Snapshot list:
|
||||
ID TAG VM SIZE DATE VM CLOCK ICOUNT
|
||||
1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000
|
||||
2 snap1 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000
|
||||
No errors were found on the image.
|
||||
|
||||
=== qemu-img snapshot can revert to snapshots ===
|
||||
|
||||
read 1048576/1048576 bytes at offset 0
|
||||
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 64512/64512 bytes at offset 0
|
||||
63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 2048/2048 bytes at offset 64512
|
||||
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 64512/64512 bytes at offset 66560
|
||||
63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
=== Deleting snapshots ===
|
||||
|
||||
QEMU X.Y.Z monitor - type 'help' for more information
|
||||
(qemu) delvm snap1
|
||||
(qemu) qemu-io disk0 "read -P0x11 0 63k"
|
||||
read 64512/64512 bytes at offset 0
|
||||
63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) qemu-io disk0 "read -P0x33 63k 2k"
|
||||
read 2048/2048 bytes at offset 64512
|
||||
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) qemu-io disk0 "read -P0x11 65k 63k"
|
||||
read 64512/64512 bytes at offset 66560
|
||||
63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
(qemu) quit
|
||||
|
||||
Snapshot list:
|
||||
ID TAG VM SIZE DATE VM CLOCK ICOUNT
|
||||
1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000
|
||||
No errors were found on the image.
|
||||
|
||||
=== Error cases ===
|
||||
|
||||
QEMU X.Y.Z monitor - type 'help' for more information
|
||||
(qemu) QEMU_PROG: Snapshot 'snap1' does not exist in one or more devices
|
||||
|
||||
QEMU X.Y.Z monitor - type 'help' for more information
|
||||
(qemu) loadvm snap1
|
||||
Error: Snapshot 'snap1' does not exist in one or more devices
|
||||
(qemu) quit
|
||||
|
||||
QEMU_PROG: 'incoming' and 'loadvm' options are mutually exclusive
|
||||
QEMU X.Y.Z monitor - type 'help' for more information
|
||||
(qemu) loadvm snap0
|
||||
Error: Device 'disk0' is writable but does not support snapshots
|
||||
(qemu) delvm snap0
|
||||
Error: Device 'disk0' is writable but does not support snapshots
|
||||
(qemu) savevm snap1
|
||||
Error: Device 'disk0' is writable but does not support snapshots
|
||||
(qemu) quit
|
||||
|
||||
QEMU_PROG: 'preconfig' and 'loadvm' options are mutually exclusive
|
||||
*** done
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
# TSan reports a double lock on RECURSIVE mutexes.
|
||||
# Since the recursive lock is intentional, we choose to ignore it.
|
||||
mutex:aio_context_acquire
|
||||
mutex:pthread_mutex_lock
|
||||
|
||||
# TSan reports a race between pthread_mutex_init() and
|
||||
|
|
|
@ -100,76 +100,12 @@ static void event_ready_cb(EventNotifier *e)
|
|||
|
||||
/* Tests using aio_*. */
|
||||
|
||||
typedef struct {
|
||||
QemuMutex start_lock;
|
||||
EventNotifier notifier;
|
||||
bool thread_acquired;
|
||||
} AcquireTestData;
|
||||
|
||||
static void *test_acquire_thread(void *opaque)
|
||||
{
|
||||
AcquireTestData *data = opaque;
|
||||
|
||||
/* Wait for other thread to let us start */
|
||||
qemu_mutex_lock(&data->start_lock);
|
||||
qemu_mutex_unlock(&data->start_lock);
|
||||
|
||||
/* event_notifier_set might be called either before or after
|
||||
* the main thread's call to poll(). The test case's outcome
|
||||
* should be the same in either case.
|
||||
*/
|
||||
event_notifier_set(&data->notifier);
|
||||
aio_context_acquire(ctx);
|
||||
aio_context_release(ctx);
|
||||
|
||||
data->thread_acquired = true; /* success, we got here */
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void set_event_notifier(AioContext *nctx, EventNotifier *notifier,
|
||||
EventNotifierHandler *handler)
|
||||
{
|
||||
aio_set_event_notifier(nctx, notifier, handler, NULL, NULL);
|
||||
}
|
||||
|
||||
static void dummy_notifier_read(EventNotifier *n)
|
||||
{
|
||||
event_notifier_test_and_clear(n);
|
||||
}
|
||||
|
||||
static void test_acquire(void)
|
||||
{
|
||||
QemuThread thread;
|
||||
AcquireTestData data;
|
||||
|
||||
/* Dummy event notifier ensures aio_poll() will block */
|
||||
event_notifier_init(&data.notifier, false);
|
||||
set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
|
||||
g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
|
||||
|
||||
qemu_mutex_init(&data.start_lock);
|
||||
qemu_mutex_lock(&data.start_lock);
|
||||
data.thread_acquired = false;
|
||||
|
||||
qemu_thread_create(&thread, "test_acquire_thread",
|
||||
test_acquire_thread,
|
||||
&data, QEMU_THREAD_JOINABLE);
|
||||
|
||||
/* Block in aio_poll(), let other thread kick us and acquire context */
|
||||
aio_context_acquire(ctx);
|
||||
qemu_mutex_unlock(&data.start_lock); /* let the thread run */
|
||||
g_assert(aio_poll(ctx, true));
|
||||
g_assert(!data.thread_acquired);
|
||||
aio_context_release(ctx);
|
||||
|
||||
qemu_thread_join(&thread);
|
||||
set_event_notifier(ctx, &data.notifier, NULL);
|
||||
event_notifier_cleanup(&data.notifier);
|
||||
|
||||
g_assert(data.thread_acquired);
|
||||
}
|
||||
|
||||
static void test_bh_schedule(void)
|
||||
{
|
||||
BHTestData data = { .n = 0 };
|
||||
|
@ -879,7 +815,7 @@ static void test_worker_thread_co_enter(void)
|
|||
qemu_thread_get_self(&this_thread);
|
||||
co = qemu_coroutine_create(co_check_current_thread, &this_thread);
|
||||
|
||||
qemu_thread_create(&worker_thread, "test_acquire_thread",
|
||||
qemu_thread_create(&worker_thread, "test_aio_co_enter",
|
||||
test_aio_co_enter,
|
||||
co, QEMU_THREAD_JOINABLE);
|
||||
|
||||
|
@ -899,7 +835,6 @@ int main(int argc, char **argv)
|
|||
while (g_main_context_iteration(NULL, false));
|
||||
|
||||
g_test_init(&argc, &argv, NULL);
|
||||
g_test_add_func("/aio/acquire", test_acquire);
|
||||
g_test_add_func("/aio/bh/schedule", test_bh_schedule);
|
||||
g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
|
||||
g_test_add_func("/aio/bh/cancel", test_bh_cancel);
|
||||
|
|
|
@ -179,13 +179,7 @@ static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
|
|||
|
||||
static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
|
||||
{
|
||||
if (drain_type != BDRV_DRAIN_ALL) {
|
||||
aio_context_acquire(bdrv_get_aio_context(bs));
|
||||
}
|
||||
do_drain_begin(drain_type, bs);
|
||||
if (drain_type != BDRV_DRAIN_ALL) {
|
||||
aio_context_release(bdrv_get_aio_context(bs));
|
||||
}
|
||||
}
|
||||
|
||||
static BlockBackend * no_coroutine_fn test_setup(void)
|
||||
|
@ -209,13 +203,7 @@ static BlockBackend * no_coroutine_fn test_setup(void)
|
|||
|
||||
static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
|
||||
{
|
||||
if (drain_type != BDRV_DRAIN_ALL) {
|
||||
aio_context_acquire(bdrv_get_aio_context(bs));
|
||||
}
|
||||
do_drain_end(drain_type, bs);
|
||||
if (drain_type != BDRV_DRAIN_ALL) {
|
||||
aio_context_release(bdrv_get_aio_context(bs));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -520,12 +508,8 @@ static void test_iothread_main_thread_bh(void *opaque)
|
|||
{
|
||||
struct test_iothread_data *data = opaque;
|
||||
|
||||
/* Test that the AioContext is not yet locked in a random BH that is
|
||||
* executed during drain, otherwise this would deadlock. */
|
||||
aio_context_acquire(bdrv_get_aio_context(data->bs));
|
||||
bdrv_flush(data->bs);
|
||||
bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */
|
||||
aio_context_release(bdrv_get_aio_context(data->bs));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -567,7 +551,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
|
|||
blk_set_disable_request_queuing(blk, true);
|
||||
|
||||
blk_set_aio_context(blk, ctx_a, &error_abort);
|
||||
aio_context_acquire(ctx_a);
|
||||
|
||||
s->bh_indirection_ctx = ctx_b;
|
||||
|
||||
|
@ -582,8 +565,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
|
|||
g_assert(acb != NULL);
|
||||
g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
|
||||
|
||||
aio_context_release(ctx_a);
|
||||
|
||||
data = (struct test_iothread_data) {
|
||||
.bs = bs,
|
||||
.drain_type = drain_type,
|
||||
|
@ -592,10 +573,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
|
|||
|
||||
switch (drain_thread) {
|
||||
case 0:
|
||||
if (drain_type != BDRV_DRAIN_ALL) {
|
||||
aio_context_acquire(ctx_a);
|
||||
}
|
||||
|
||||
/*
|
||||
* Increment in_flight so that do_drain_begin() waits for
|
||||
* test_iothread_main_thread_bh(). This prevents the race between
|
||||
|
@ -613,20 +590,10 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
|
|||
do_drain_begin(drain_type, bs);
|
||||
g_assert_cmpint(bs->in_flight, ==, 0);
|
||||
|
||||
if (drain_type != BDRV_DRAIN_ALL) {
|
||||
aio_context_release(ctx_a);
|
||||
}
|
||||
qemu_event_wait(&done_event);
|
||||
if (drain_type != BDRV_DRAIN_ALL) {
|
||||
aio_context_acquire(ctx_a);
|
||||
}
|
||||
|
||||
g_assert_cmpint(aio_ret, ==, 0);
|
||||
do_drain_end(drain_type, bs);
|
||||
|
||||
if (drain_type != BDRV_DRAIN_ALL) {
|
||||
aio_context_release(ctx_a);
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
co = qemu_coroutine_create(test_iothread_drain_co_entry, &data);
|
||||
|
@ -637,9 +604,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
|
|||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
aio_context_acquire(ctx_a);
|
||||
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
|
||||
aio_context_release(ctx_a);
|
||||
|
||||
bdrv_unref(bs);
|
||||
blk_unref(blk);
|
||||
|
@ -757,7 +722,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
|
|||
BlockJob *job;
|
||||
TestBlockJob *tjob;
|
||||
IOThread *iothread = NULL;
|
||||
AioContext *ctx;
|
||||
int ret;
|
||||
|
||||
src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
|
||||
|
@ -787,11 +751,11 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
|
|||
}
|
||||
|
||||
if (use_iothread) {
|
||||
AioContext *ctx;
|
||||
|
||||
iothread = iothread_new();
|
||||
ctx = iothread_get_aio_context(iothread);
|
||||
blk_set_aio_context(blk_src, ctx, &error_abort);
|
||||
} else {
|
||||
ctx = qemu_get_aio_context();
|
||||
}
|
||||
|
||||
target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
|
||||
|
@ -800,16 +764,15 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
|
|||
blk_insert_bs(blk_target, target, &error_abort);
|
||||
blk_set_allow_aio_context_change(blk_target, true);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
tjob = block_job_create("job0", &test_job_driver, NULL, src,
|
||||
0, BLK_PERM_ALL,
|
||||
0, 0, NULL, NULL, &error_abort);
|
||||
tjob->bs = src;
|
||||
job = &tjob->common;
|
||||
|
||||
bdrv_graph_wrlock(target);
|
||||
bdrv_graph_wrlock();
|
||||
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
|
||||
bdrv_graph_wrunlock(target);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
switch (result) {
|
||||
case TEST_JOB_SUCCESS:
|
||||
|
@ -821,7 +784,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
|
|||
tjob->prepare_ret = -EIO;
|
||||
break;
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
|
||||
job_start(&job->job);
|
||||
|
||||
|
@ -912,12 +874,10 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
|
|||
}
|
||||
g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
if (use_iothread) {
|
||||
blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
|
||||
assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
|
||||
blk_unref(blk_src);
|
||||
blk_unref(blk_target);
|
||||
|
@ -991,11 +951,11 @@ static void bdrv_test_top_close(BlockDriverState *bs)
|
|||
{
|
||||
BdrvChild *c, *next_c;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
|
||||
bdrv_unref_child(bs, c);
|
||||
}
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
|
@ -1085,10 +1045,10 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
|
|||
|
||||
null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
|
||||
&error_abort);
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
/* This child will be the one to pass to requests through to, and
|
||||
* it will stall until a drain occurs */
|
||||
|
@ -1096,21 +1056,21 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
|
|||
&error_abort);
|
||||
child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
|
||||
/* Takes our reference to child_bs */
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
|
||||
&child_of_bds,
|
||||
BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
/* This child is just there to be deleted
|
||||
* (for detach_instead_of_delete == true) */
|
||||
null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
|
||||
&error_abort);
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
|
||||
blk_insert_bs(blk, bs, &error_abort);
|
||||
|
@ -1193,14 +1153,14 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque)
|
|||
|
||||
bdrv_dec_in_flight(data->child_b->bs);
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_unref_child(data->parent_b, data->child_b);
|
||||
|
||||
bdrv_ref(data->c);
|
||||
data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
|
||||
&child_of_bds, BDRV_CHILD_DATA,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
}
|
||||
|
||||
static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret)
|
||||
|
@ -1298,7 +1258,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
|
|||
/* Set child relationships */
|
||||
bdrv_ref(b);
|
||||
bdrv_ref(a);
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
|
||||
|
@ -1308,7 +1268,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
|
|||
bdrv_attach_child(parent_a, a, "PA-A",
|
||||
by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
g_assert_cmpint(parent_a->refcnt, ==, 1);
|
||||
g_assert_cmpint(parent_b->refcnt, ==, 1);
|
||||
|
@ -1401,9 +1361,7 @@ static void test_append_to_drained(void)
|
|||
g_assert_cmpint(base_s->drain_count, ==, 1);
|
||||
g_assert_cmpint(base->in_flight, ==, 0);
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
bdrv_append(overlay, base, &error_abort);
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
|
||||
g_assert_cmpint(base->in_flight, ==, 0);
|
||||
g_assert_cmpint(overlay->in_flight, ==, 0);
|
||||
|
@ -1438,16 +1396,11 @@ static void test_set_aio_context(void)
|
|||
|
||||
bdrv_drained_begin(bs);
|
||||
bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort);
|
||||
|
||||
aio_context_acquire(ctx_a);
|
||||
bdrv_drained_end(bs);
|
||||
|
||||
bdrv_drained_begin(bs);
|
||||
bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort);
|
||||
aio_context_release(ctx_a);
|
||||
aio_context_acquire(ctx_b);
|
||||
bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort);
|
||||
aio_context_release(ctx_b);
|
||||
bdrv_drained_end(bs);
|
||||
|
||||
bdrv_unref(bs);
|
||||
|
@ -1727,7 +1680,7 @@ static void test_drop_intermediate_poll(void)
|
|||
* Establish the chain last, so the chain links are the first
|
||||
* elements in the BDS.parents lists
|
||||
*/
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (i) {
|
||||
/* Takes the reference to chain[i - 1] */
|
||||
|
@ -1735,7 +1688,7 @@ static void test_drop_intermediate_poll(void)
|
|||
&chain_child_class, BDRV_CHILD_COW, &error_abort);
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
|
||||
0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
|
||||
|
@ -1982,10 +1935,10 @@ static void do_test_replace_child_mid_drain(int old_drain_count,
|
|||
new_child_bs->total_sectors = 1;
|
||||
|
||||
bdrv_ref(old_child_bs);
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds,
|
||||
BDRV_CHILD_COW, &error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
parent_s->setup_completed = true;
|
||||
|
||||
for (i = 0; i < old_drain_count; i++) {
|
||||
|
@ -2016,9 +1969,9 @@ static void do_test_replace_child_mid_drain(int old_drain_count,
|
|||
g_assert(parent_bs->quiesce_counter == old_drain_count);
|
||||
bdrv_drained_begin(old_child_bs);
|
||||
bdrv_drained_begin(new_child_bs);
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_replace_node(old_child_bs, new_child_bs, &error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drained_end(new_child_bs);
|
||||
bdrv_drained_end(old_child_bs);
|
||||
g_assert(parent_bs->quiesce_counter == new_drain_count);
|
||||
|
|
|
@ -137,15 +137,13 @@ static void test_update_perm_tree(void)
|
|||
|
||||
blk_insert_bs(root, bs, &error_abort);
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_attach_child(filter, bs, "child", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
ret = bdrv_append(filter, bs, NULL);
|
||||
g_assert_cmpint(ret, <, 0);
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
|
||||
bdrv_unref(filter);
|
||||
blk_unref(root);
|
||||
|
@ -206,14 +204,12 @@ static void test_should_update_child(void)
|
|||
|
||||
bdrv_set_backing_hd(target, bs, &error_abort);
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
g_assert(target->backing->bs == bs);
|
||||
bdrv_attach_child(filter, target, "target", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_append(filter, bs, &error_abort);
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
g_assert(target->backing->bs == bs);
|
||||
|
@ -248,7 +244,7 @@ static void test_parallel_exclusive_write(void)
|
|||
bdrv_ref(base);
|
||||
bdrv_ref(fl1);
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_attach_child(top, fl1, "backing", &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
|
@ -260,7 +256,7 @@ static void test_parallel_exclusive_write(void)
|
|||
&error_abort);
|
||||
|
||||
bdrv_replace_node(fl1, fl2, &error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
bdrv_drained_end(fl2);
|
||||
bdrv_drained_end(fl1);
|
||||
|
@ -367,7 +363,7 @@ static void test_parallel_perm_update(void)
|
|||
*/
|
||||
bdrv_ref(base);
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA,
|
||||
&error_abort);
|
||||
c_fl1 = bdrv_attach_child(ws, fl1, "first", &child_of_bds,
|
||||
|
@ -380,7 +376,7 @@ static void test_parallel_perm_update(void)
|
|||
bdrv_attach_child(fl2, base, "backing", &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
/* Select fl1 as first child to be active */
|
||||
s->selected = c_fl1;
|
||||
|
@ -434,15 +430,13 @@ static void test_append_greedy_filter(void)
|
|||
BlockDriverState *base = no_perm_node("base");
|
||||
BlockDriverState *fl = exclusive_writer_node("fl1");
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_attach_child(top, base, "backing", &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
bdrv_append(fl, base, &error_abort);
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
bdrv_unref(fl);
|
||||
bdrv_unref(top);
|
||||
}
|
||||
|
|
|
@ -483,7 +483,6 @@ static void test_sync_op(const void *opaque)
|
|||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
blk_set_aio_context(blk, ctx, &error_abort);
|
||||
aio_context_acquire(ctx);
|
||||
if (t->fn) {
|
||||
t->fn(c);
|
||||
}
|
||||
|
@ -491,7 +490,6 @@ static void test_sync_op(const void *opaque)
|
|||
t->blkfn(blk);
|
||||
}
|
||||
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
|
||||
aio_context_release(ctx);
|
||||
|
||||
bdrv_unref(bs);
|
||||
blk_unref(blk);
|
||||
|
@ -576,9 +574,7 @@ static void test_attach_blockjob(void)
|
|||
aio_poll(qemu_get_aio_context(), false);
|
||||
}
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
|
||||
aio_context_release(ctx);
|
||||
|
||||
tjob->n = 0;
|
||||
while (tjob->n == 0) {
|
||||
|
@ -595,9 +591,7 @@ static void test_attach_blockjob(void)
|
|||
WITH_JOB_LOCK_GUARD() {
|
||||
job_complete_sync_locked(&tjob->common.job, &error_abort);
|
||||
}
|
||||
aio_context_acquire(ctx);
|
||||
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
|
||||
aio_context_release(ctx);
|
||||
|
||||
bdrv_unref(bs);
|
||||
blk_unref(blk);
|
||||
|
@ -654,9 +648,7 @@ static void test_propagate_basic(void)
|
|||
|
||||
/* Switch the AioContext back */
|
||||
main_ctx = qemu_get_aio_context();
|
||||
aio_context_acquire(ctx);
|
||||
blk_set_aio_context(blk, main_ctx, &error_abort);
|
||||
aio_context_release(ctx);
|
||||
g_assert(blk_get_aio_context(blk) == main_ctx);
|
||||
g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
|
||||
g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
|
||||
|
@ -732,9 +724,7 @@ static void test_propagate_diamond(void)
|
|||
|
||||
/* Switch the AioContext back */
|
||||
main_ctx = qemu_get_aio_context();
|
||||
aio_context_acquire(ctx);
|
||||
blk_set_aio_context(blk, main_ctx, &error_abort);
|
||||
aio_context_release(ctx);
|
||||
g_assert(blk_get_aio_context(blk) == main_ctx);
|
||||
g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
|
||||
g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
|
||||
|
@ -764,13 +754,11 @@ static void test_propagate_mirror(void)
|
|||
&error_abort);
|
||||
|
||||
/* Start a mirror job */
|
||||
aio_context_acquire(main_ctx);
|
||||
mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
|
||||
MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
|
||||
BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
|
||||
false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
|
||||
&error_abort);
|
||||
aio_context_release(main_ctx);
|
||||
|
||||
WITH_JOB_LOCK_GUARD() {
|
||||
job = job_get_locked("job0");
|
||||
|
@ -785,9 +773,7 @@ static void test_propagate_mirror(void)
|
|||
g_assert(job->aio_context == ctx);
|
||||
|
||||
/* Change the AioContext of target */
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
|
||||
aio_context_release(ctx);
|
||||
g_assert(bdrv_get_aio_context(src) == main_ctx);
|
||||
g_assert(bdrv_get_aio_context(target) == main_ctx);
|
||||
g_assert(bdrv_get_aio_context(filter) == main_ctx);
|
||||
|
@ -805,10 +791,8 @@ static void test_propagate_mirror(void)
|
|||
g_assert(bdrv_get_aio_context(filter) == main_ctx);
|
||||
|
||||
/* ...unless we explicitly allow it */
|
||||
aio_context_acquire(ctx);
|
||||
blk_set_allow_aio_context_change(blk, true);
|
||||
bdrv_try_change_aio_context(target, ctx, NULL, &error_abort);
|
||||
aio_context_release(ctx);
|
||||
|
||||
g_assert(blk_get_aio_context(blk) == ctx);
|
||||
g_assert(bdrv_get_aio_context(src) == ctx);
|
||||
|
@ -817,10 +801,8 @@ static void test_propagate_mirror(void)
|
|||
|
||||
job_cancel_sync_all();
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
blk_set_aio_context(blk, main_ctx, &error_abort);
|
||||
bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
|
||||
aio_context_release(ctx);
|
||||
|
||||
blk_unref(blk);
|
||||
bdrv_unref(src);
|
||||
|
@ -836,7 +818,6 @@ static void test_attach_second_node(void)
|
|||
BlockDriverState *bs, *filter;
|
||||
QDict *options;
|
||||
|
||||
aio_context_acquire(main_ctx);
|
||||
blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
|
||||
bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
|
||||
blk_insert_bs(blk, bs, &error_abort);
|
||||
|
@ -846,15 +827,12 @@ static void test_attach_second_node(void)
|
|||
qdict_put_str(options, "file", "base");
|
||||
|
||||
filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
|
||||
aio_context_release(main_ctx);
|
||||
|
||||
g_assert(blk_get_aio_context(blk) == ctx);
|
||||
g_assert(bdrv_get_aio_context(bs) == ctx);
|
||||
g_assert(bdrv_get_aio_context(filter) == ctx);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
blk_set_aio_context(blk, main_ctx, &error_abort);
|
||||
aio_context_release(ctx);
|
||||
g_assert(blk_get_aio_context(blk) == main_ctx);
|
||||
g_assert(bdrv_get_aio_context(bs) == main_ctx);
|
||||
g_assert(bdrv_get_aio_context(filter) == main_ctx);
|
||||
|
@ -868,11 +846,9 @@ static void test_attach_preserve_blk_ctx(void)
|
|||
{
|
||||
IOThread *iothread = iothread_new();
|
||||
AioContext *ctx = iothread_get_aio_context(iothread);
|
||||
AioContext *main_ctx = qemu_get_aio_context();
|
||||
BlockBackend *blk;
|
||||
BlockDriverState *bs;
|
||||
|
||||
aio_context_acquire(main_ctx);
|
||||
blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
|
||||
bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
|
||||
bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
|
||||
|
@ -881,25 +857,18 @@ static void test_attach_preserve_blk_ctx(void)
|
|||
blk_insert_bs(blk, bs, &error_abort);
|
||||
g_assert(blk_get_aio_context(blk) == ctx);
|
||||
g_assert(bdrv_get_aio_context(bs) == ctx);
|
||||
aio_context_release(main_ctx);
|
||||
|
||||
/* Remove the node again */
|
||||
aio_context_acquire(ctx);
|
||||
blk_remove_bs(blk);
|
||||
aio_context_release(ctx);
|
||||
g_assert(blk_get_aio_context(blk) == ctx);
|
||||
g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
|
||||
|
||||
/* Re-attach the node */
|
||||
aio_context_acquire(main_ctx);
|
||||
blk_insert_bs(blk, bs, &error_abort);
|
||||
aio_context_release(main_ctx);
|
||||
g_assert(blk_get_aio_context(blk) == ctx);
|
||||
g_assert(bdrv_get_aio_context(bs) == ctx);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
|
||||
aio_context_release(ctx);
|
||||
bdrv_unref(bs);
|
||||
blk_unref(blk);
|
||||
}
|
||||
|
|
|
@ -228,7 +228,6 @@ static void cancel_common(CancelJob *s)
|
|||
BlockJob *job = &s->common;
|
||||
BlockBackend *blk = s->blk;
|
||||
JobStatus sts = job->job.status;
|
||||
AioContext *ctx = job->job.aio_context;
|
||||
|
||||
job_cancel_sync(&job->job, true);
|
||||
WITH_JOB_LOCK_GUARD() {
|
||||
|
@ -240,9 +239,7 @@ static void cancel_common(CancelJob *s)
|
|||
job_unref_locked(&job->job);
|
||||
}
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
destroy_blk(blk);
|
||||
aio_context_release(ctx);
|
||||
|
||||
}
|
||||
|
||||
|
@ -391,132 +388,6 @@ static void test_cancel_concluded(void)
|
|||
cancel_common(s);
|
||||
}
|
||||
|
||||
/* (See test_yielding_driver for the job description) */
|
||||
typedef struct YieldingJob {
|
||||
BlockJob common;
|
||||
bool should_complete;
|
||||
} YieldingJob;
|
||||
|
||||
static void yielding_job_complete(Job *job, Error **errp)
|
||||
{
|
||||
YieldingJob *s = container_of(job, YieldingJob, common.job);
|
||||
s->should_complete = true;
|
||||
job_enter(job);
|
||||
}
|
||||
|
||||
static int coroutine_fn yielding_job_run(Job *job, Error **errp)
|
||||
{
|
||||
YieldingJob *s = container_of(job, YieldingJob, common.job);
|
||||
|
||||
job_transition_to_ready(job);
|
||||
|
||||
while (!s->should_complete) {
|
||||
job_yield(job);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This job transitions immediately to the READY state, and then
|
||||
* yields until it is to complete.
|
||||
*/
|
||||
static const BlockJobDriver test_yielding_driver = {
|
||||
.job_driver = {
|
||||
.instance_size = sizeof(YieldingJob),
|
||||
.free = block_job_free,
|
||||
.user_resume = block_job_user_resume,
|
||||
.run = yielding_job_run,
|
||||
.complete = yielding_job_complete,
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Test that job_complete_locked() works even on jobs that are in a paused
|
||||
* state (i.e., STANDBY).
|
||||
*
|
||||
* To do this, run YieldingJob in an IO thread, get it into the READY
|
||||
* state, then have a drained section. Before ending the section,
|
||||
* acquire the context so the job will not be entered and will thus
|
||||
* remain on STANDBY.
|
||||
*
|
||||
* job_complete_locked() should still work without error.
|
||||
*
|
||||
* Note that on the QMP interface, it is impossible to lock an IO
|
||||
* thread before a drained section ends. In practice, the
|
||||
* bdrv_drain_all_end() and the aio_context_acquire() will be
|
||||
* reversed. However, that makes for worse reproducibility here:
|
||||
* Sometimes, the job would no longer be in STANDBY then but already
|
||||
* be started. We cannot prevent that, because the IO thread runs
|
||||
* concurrently. We can only prevent it by taking the lock before
|
||||
* ending the drained section, so we do that.
|
||||
*
|
||||
* (You can reverse the order of operations and most of the time the
|
||||
* test will pass, but sometimes the assert(status == STANDBY) will
|
||||
* fail.)
|
||||
*/
|
||||
static void test_complete_in_standby(void)
|
||||
{
|
||||
BlockBackend *blk;
|
||||
IOThread *iothread;
|
||||
AioContext *ctx;
|
||||
Job *job;
|
||||
BlockJob *bjob;
|
||||
|
||||
/* Create a test drive, move it to an IO thread */
|
||||
blk = create_blk(NULL);
|
||||
iothread = iothread_new();
|
||||
|
||||
ctx = iothread_get_aio_context(iothread);
|
||||
blk_set_aio_context(blk, ctx, &error_abort);
|
||||
|
||||
/* Create our test job */
|
||||
bjob = mk_job(blk, "job", &test_yielding_driver, true,
|
||||
JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
|
||||
job = &bjob->job;
|
||||
assert_job_status_is(job, JOB_STATUS_CREATED);
|
||||
|
||||
/* Wait for the job to become READY */
|
||||
job_start(job);
|
||||
/*
|
||||
* Here we are waiting for the status to change, so don't bother
|
||||
* protecting the read every time.
|
||||
*/
|
||||
AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY);
|
||||
|
||||
/* Begin the drained section, pausing the job */
|
||||
bdrv_drain_all_begin();
|
||||
assert_job_status_is(job, JOB_STATUS_STANDBY);
|
||||
|
||||
/* Lock the IO thread to prevent the job from being run */
|
||||
aio_context_acquire(ctx);
|
||||
/* This will schedule the job to resume it */
|
||||
bdrv_drain_all_end();
|
||||
aio_context_release(ctx);
|
||||
|
||||
WITH_JOB_LOCK_GUARD() {
|
||||
/* But the job cannot run, so it will remain on standby */
|
||||
assert(job->status == JOB_STATUS_STANDBY);
|
||||
|
||||
/* Even though the job is on standby, this should work */
|
||||
job_complete_locked(job, &error_abort);
|
||||
|
||||
/* The test is done now, clean up. */
|
||||
job_finish_sync_locked(job, NULL, &error_abort);
|
||||
assert(job->status == JOB_STATUS_PENDING);
|
||||
|
||||
job_finalize_locked(job, &error_abort);
|
||||
assert(job->status == JOB_STATUS_CONCLUDED);
|
||||
|
||||
job_dismiss_locked(&job, &error_abort);
|
||||
}
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
destroy_blk(blk);
|
||||
aio_context_release(ctx);
|
||||
iothread_join(iothread);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
qemu_init_main_loop(&error_abort);
|
||||
|
@ -531,13 +402,5 @@ int main(int argc, char **argv)
|
|||
g_test_add_func("/blockjob/cancel/standby", test_cancel_standby);
|
||||
g_test_add_func("/blockjob/cancel/pending", test_cancel_pending);
|
||||
g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded);
|
||||
|
||||
/*
|
||||
* This test is flaky and sometimes fails in CI and otherwise:
|
||||
* don't run unless user opts in via environment variable.
|
||||
*/
|
||||
if (getenv("QEMU_TEST_FLAKY_TESTS")) {
|
||||
g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby);
|
||||
}
|
||||
return g_test_run();
|
||||
}
|
||||
|
|
|
@ -199,17 +199,13 @@ static BlockBackend *start_primary(void)
|
|||
static void teardown_primary(void)
|
||||
{
|
||||
BlockBackend *blk;
|
||||
AioContext *ctx;
|
||||
|
||||
/* remove P_ID */
|
||||
blk = blk_by_name(P_ID);
|
||||
assert(blk);
|
||||
|
||||
ctx = blk_get_aio_context(blk);
|
||||
aio_context_acquire(ctx);
|
||||
monitor_remove_blk(blk);
|
||||
blk_unref(blk);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
static void test_primary_read(void)
|
||||
|
@ -345,27 +341,20 @@ static void teardown_secondary(void)
|
|||
{
|
||||
/* only need to destroy two BBs */
|
||||
BlockBackend *blk;
|
||||
AioContext *ctx;
|
||||
|
||||
/* remove S_LOCAL_DISK_ID */
|
||||
blk = blk_by_name(S_LOCAL_DISK_ID);
|
||||
assert(blk);
|
||||
|
||||
ctx = blk_get_aio_context(blk);
|
||||
aio_context_acquire(ctx);
|
||||
monitor_remove_blk(blk);
|
||||
blk_unref(blk);
|
||||
aio_context_release(ctx);
|
||||
|
||||
/* remove S_ID */
|
||||
blk = blk_by_name(S_ID);
|
||||
assert(blk);
|
||||
|
||||
ctx = blk_get_aio_context(blk);
|
||||
aio_context_acquire(ctx);
|
||||
monitor_remove_blk(blk);
|
||||
blk_unref(blk);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
static void test_secondary_read(void)
|
||||
|
|
14
util/async.c
14
util/async.c
|
@ -562,12 +562,10 @@ static void co_schedule_bh_cb(void *opaque)
|
|||
Coroutine *co = QSLIST_FIRST(&straight);
|
||||
QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
|
||||
trace_aio_co_schedule_bh_cb(ctx, co);
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
/* Protected by write barrier in qemu_aio_coroutine_enter */
|
||||
qatomic_set(&co->scheduled, NULL);
|
||||
qemu_aio_coroutine_enter(ctx, co);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -707,9 +705,7 @@ void aio_co_enter(AioContext *ctx, Coroutine *co)
|
|||
assert(self != co);
|
||||
QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
|
||||
} else {
|
||||
aio_context_acquire(ctx);
|
||||
qemu_aio_coroutine_enter(ctx, co);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -723,16 +719,6 @@ void aio_context_unref(AioContext *ctx)
|
|||
g_source_unref(&ctx->source);
|
||||
}
|
||||
|
||||
void aio_context_acquire(AioContext *ctx)
|
||||
{
|
||||
qemu_rec_mutex_lock(&ctx->lock);
|
||||
}
|
||||
|
||||
void aio_context_release(AioContext *ctx)
|
||||
{
|
||||
qemu_rec_mutex_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
|
||||
|
||||
AioContext *qemu_get_current_aio_context(void)
|
||||
|
|
|
@ -360,10 +360,7 @@ static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc,
|
|||
|
||||
qio_channel_set_follow_coroutine_ctx(server->ioc, true);
|
||||
|
||||
/* Attaching the AioContext starts the vu_client_trip coroutine */
|
||||
aio_context_acquire(server->ctx);
|
||||
vhost_user_server_attach_aio_context(server, server->ctx);
|
||||
aio_context_release(server->ctx);
|
||||
}
|
||||
|
||||
/* server->ctx acquired by caller */
|
||||
|
|
Loading…
Reference in New Issue