mirror of https://github.com/xemu-project/xemu.git
Block layer patches
- Fix graph lock related deadlocks with the stream job - ahci: Fix legacy software reset - ide/via: Fix switch between compatibility and native mode -----BEGIN PGP SIGNATURE----- iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmVcmYoRHGt3b2xmQHJl ZGhhdC5jb20ACgkQfwmycsiPL9YDzw/7BD6wZpyCsDbFu9Jbt0L894tYQls7otnR yeAIaZVqSkDcMK8VBD/xAjV8UgX194oKPi42CDgS73avd0cSHLIM5cNgGkwCrMWS ry5uuOP6EWVMPPR/129cpH8uGvkl+qwCQf5gB13/8NvMbeN2mHOTC6WW+VA20vb0 V0DJXhYszVzXa3L1a/m6f4Jwj54tTeZ56JcBblL3wi/soklb45gsnPJaHeGb3rzK yjPkw+kpVXTVbpacobGmzmjlD3Yqk69NexP2kyU1w2lqPnemYPH+9sa+7RxMspkj InQvqq6TFtMOrC/65/527p2ENRUOxn7Xwsa1+Hnar2i3BoyGugWE8GPxJDBxAWW4 INJtpxIpiA7Scd26VBCNVstVe5EuyxkP97T85cgNUMgeE58y3i51i6eHd4GUIR7v PNc5TsSbnVV8sQ7RsXka4hRyjndIPRB0CBePydDoBz6zaGmcVU6ep0Oppah9gVu9 CU0dBz2jV0r1dFhU1eZkCbd1ufdR93R/iD3gBD4vj1xSL3l+9OE/FKdrVE66uElL iAsHp3cimkPuWAx/jZaeAC7BDI0XS6s1TimddqJx90f2mZjkq8cmVp+HoVNP0jRQ VP6AIQy6is+P4QtDSekgXVJE8K95ngBzsr+ittR8jF4q67QzHVjLmJ9ZBXyrowlz gtZTy2WPxbM= =8dXj -----END PGP SIGNATURE----- Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging Block layer patches - Fix graph lock related deadlocks with the stream job - ahci: Fix legacy software reset - ide/via: Fix switch between compatibility and native mode # -----BEGIN PGP SIGNATURE----- # # iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmVcmYoRHGt3b2xmQHJl # ZGhhdC5jb20ACgkQfwmycsiPL9YDzw/7BD6wZpyCsDbFu9Jbt0L894tYQls7otnR # yeAIaZVqSkDcMK8VBD/xAjV8UgX194oKPi42CDgS73avd0cSHLIM5cNgGkwCrMWS # ry5uuOP6EWVMPPR/129cpH8uGvkl+qwCQf5gB13/8NvMbeN2mHOTC6WW+VA20vb0 # V0DJXhYszVzXa3L1a/m6f4Jwj54tTeZ56JcBblL3wi/soklb45gsnPJaHeGb3rzK # yjPkw+kpVXTVbpacobGmzmjlD3Yqk69NexP2kyU1w2lqPnemYPH+9sa+7RxMspkj # InQvqq6TFtMOrC/65/527p2ENRUOxn7Xwsa1+Hnar2i3BoyGugWE8GPxJDBxAWW4 # INJtpxIpiA7Scd26VBCNVstVe5EuyxkP97T85cgNUMgeE58y3i51i6eHd4GUIR7v # PNc5TsSbnVV8sQ7RsXka4hRyjndIPRB0CBePydDoBz6zaGmcVU6ep0Oppah9gVu9 # CU0dBz2jV0r1dFhU1eZkCbd1ufdR93R/iD3gBD4vj1xSL3l+9OE/FKdrVE66uElL # iAsHp3cimkPuWAx/jZaeAC7BDI0XS6s1TimddqJx90f2mZjkq8cmVp+HoVNP0jRQ # VP6AIQy6is+P4QtDSekgXVJE8K95ngBzsr+ittR8jF4q67QzHVjLmJ9ZBXyrowlz # gtZTy2WPxbM= # =8dXj # -----END PGP SIGNATURE----- # gpg: Signature made Tue 21 Nov 2023 06:50:34 EST # gpg: using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6 # gpg: issuer "kwolf@redhat.com" # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full] # Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6 * tag 'for-upstream' of https://repo.or.cz/qemu/kevin: hw/ide/via: implement legacy/native mode switching ide/via: don't attempt to set default BAR addresses ide/pci: introduce pci_ide_update_mode() function ide/ioport: move ide_portio_list[] and ide_portio_list2[] definitions to IDE core iotests: Test two stream jobs in a single iothread stream: Fix AioContext locking during bdrv_graph_wrlock() block: Fix deadlocks in bdrv_graph_wrunlock() block: Fix bdrv_graph_wrlock() call in blk_remove_bs() hw/ide/ahci: fix legacy software reset Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
d50a13424e
39
block.c
39
block.c
|
@ -1713,7 +1713,7 @@ open_failed:
|
|||
bdrv_unref_child(bs, bs->file);
|
||||
assert(!bs->file);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
g_free(bs->opaque);
|
||||
bs->opaque = NULL;
|
||||
|
@ -3577,7 +3577,7 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
|
|||
bdrv_drained_begin(drain_bs);
|
||||
bdrv_graph_wrlock(backing_hd);
|
||||
ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(backing_hd);
|
||||
bdrv_drained_end(drain_bs);
|
||||
bdrv_unref(drain_bs);
|
||||
|
||||
|
@ -3796,7 +3796,7 @@ BdrvChild *bdrv_open_child(const char *filename,
|
|||
child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role,
|
||||
errp);
|
||||
aio_context_release(ctx);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
return child;
|
||||
}
|
||||
|
@ -4652,7 +4652,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
|||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
tran_commit(tran);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) {
|
||||
BlockDriverState *bs = bs_entry->state.bs;
|
||||
|
@ -4671,7 +4671,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
|||
abort:
|
||||
bdrv_graph_wrlock(NULL);
|
||||
tran_abort(tran);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
|
||||
if (bs_entry->prepared) {
|
||||
|
@ -4857,7 +4857,7 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
|
|||
ret = bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing,
|
||||
tran, errp);
|
||||
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock_ctx(ctx);
|
||||
|
||||
if (old_ctx != ctx) {
|
||||
aio_context_release(ctx);
|
||||
|
@ -5216,7 +5216,7 @@ static void bdrv_close(BlockDriverState *bs)
|
|||
|
||||
assert(!bs->backing);
|
||||
assert(!bs->file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
|
||||
g_free(bs->opaque);
|
||||
bs->opaque = NULL;
|
||||
|
@ -5511,7 +5511,7 @@ int bdrv_drop_filter(BlockDriverState *bs, Error **errp)
|
|||
bdrv_drained_begin(child_bs);
|
||||
bdrv_graph_wrlock(bs);
|
||||
ret = bdrv_replace_node_common(bs, child_bs, true, true, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_drained_end(child_bs);
|
||||
|
||||
return ret;
|
||||
|
@ -5593,7 +5593,7 @@ out:
|
|||
tran_finalize(tran, ret);
|
||||
|
||||
bdrv_refresh_limits(bs_top, NULL, NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs_top);
|
||||
|
||||
bdrv_drained_end(bs_top);
|
||||
bdrv_drained_end(bs_new);
|
||||
|
@ -5631,7 +5631,7 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs,
|
|||
|
||||
tran_finalize(tran, ret);
|
||||
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(new_bs);
|
||||
bdrv_drained_end(old_bs);
|
||||
bdrv_drained_end(new_bs);
|
||||
bdrv_unref(old_bs);
|
||||
|
@ -5720,7 +5720,7 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options,
|
|||
bdrv_drained_begin(new_node_bs);
|
||||
bdrv_graph_wrlock(new_node_bs);
|
||||
ret = bdrv_replace_node(bs, new_node_bs, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(new_node_bs);
|
||||
bdrv_drained_end(new_node_bs);
|
||||
bdrv_drained_end(bs);
|
||||
bdrv_unref(bs);
|
||||
|
@ -6015,7 +6015,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
|||
* That's a FIXME.
|
||||
*/
|
||||
bdrv_replace_node_common(top, base, false, false, &local_err);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(base);
|
||||
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
|
@ -6052,7 +6052,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
|||
goto exit;
|
||||
|
||||
exit_wrlock:
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(base);
|
||||
exit:
|
||||
bdrv_drained_end(base);
|
||||
bdrv_unref(top);
|
||||
|
@ -7254,6 +7254,16 @@ void bdrv_unref(BlockDriverState *bs)
|
|||
}
|
||||
}
|
||||
|
||||
static void bdrv_schedule_unref_bh(void *opaque)
|
||||
{
|
||||
BlockDriverState *bs = opaque;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_unref(bs);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Release a BlockDriverState reference while holding the graph write lock.
|
||||
*
|
||||
|
@ -7267,8 +7277,7 @@ void bdrv_schedule_unref(BlockDriverState *bs)
|
|||
if (!bs) {
|
||||
return;
|
||||
}
|
||||
aio_bh_schedule_oneshot(qemu_get_aio_context(),
|
||||
(QEMUBHFunc *) bdrv_unref, bs);
|
||||
aio_bh_schedule_oneshot(qemu_get_aio_context(), bdrv_schedule_unref_bh, bs);
|
||||
}
|
||||
|
||||
struct BdrvOpBlocker {
|
||||
|
|
|
@ -499,7 +499,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
|||
bdrv_graph_wrlock(target);
|
||||
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(target);
|
||||
|
||||
return &job->common;
|
||||
|
||||
|
|
|
@ -253,7 +253,7 @@ fail_log:
|
|||
if (ret < 0) {
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, s->log_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
s->log_file = NULL;
|
||||
}
|
||||
fail:
|
||||
|
@ -268,7 +268,7 @@ static void blk_log_writes_close(BlockDriverState *bs)
|
|||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, s->log_file);
|
||||
s->log_file = NULL;
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn GRAPH_RDLOCK
|
||||
|
|
|
@ -154,7 +154,7 @@ static void blkverify_close(BlockDriverState *bs)
|
|||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, s->test_file);
|
||||
s->test_file = NULL;
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn GRAPH_RDLOCK
|
||||
|
|
|
@ -882,11 +882,14 @@ BlockBackend *blk_by_public(BlockBackendPublic *public)
|
|||
|
||||
/*
|
||||
* Disassociates the currently associated BlockDriverState from @blk.
|
||||
*
|
||||
* The caller must hold the AioContext lock for the BlockBackend.
|
||||
*/
|
||||
void blk_remove_bs(BlockBackend *blk)
|
||||
{
|
||||
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
|
||||
BdrvChild *root;
|
||||
AioContext *ctx;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
|
@ -916,9 +919,10 @@ void blk_remove_bs(BlockBackend *blk)
|
|||
root = blk->root;
|
||||
blk->root = NULL;
|
||||
|
||||
bdrv_graph_wrlock(NULL);
|
||||
ctx = bdrv_get_aio_context(root->bs);
|
||||
bdrv_graph_wrlock(root->bs);
|
||||
bdrv_root_unref_child(root);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock_ctx(ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -929,6 +933,8 @@ void blk_remove_bs(BlockBackend *blk)
|
|||
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_ref(bs);
|
||||
bdrv_graph_wrlock(bs);
|
||||
|
@ -936,7 +942,7 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
|
|||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
blk->perm, blk->shared_perm,
|
||||
blk, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock_ctx(ctx);
|
||||
if (blk->root == NULL) {
|
||||
return -EPERM;
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ static void commit_abort(Job *job)
|
|||
bdrv_drained_begin(commit_top_backing_bs);
|
||||
bdrv_graph_wrlock(commit_top_backing_bs);
|
||||
bdrv_replace_node(s->commit_top_bs, commit_top_backing_bs, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(commit_top_backing_bs);
|
||||
bdrv_drained_end(commit_top_backing_bs);
|
||||
|
||||
bdrv_unref(s->commit_top_bs);
|
||||
|
@ -370,19 +370,19 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
|||
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
|
||||
iter_shared_perms, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(top);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(top);
|
||||
goto fail;
|
||||
}
|
||||
s->chain_frozen = true;
|
||||
|
||||
ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(top);
|
||||
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
|
@ -436,7 +436,7 @@ fail:
|
|||
bdrv_drained_begin(top);
|
||||
bdrv_graph_wrlock(top);
|
||||
bdrv_replace_node(commit_top_bs, top, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(top);
|
||||
bdrv_drained_end(top);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -161,11 +161,21 @@ void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs)
|
|||
}
|
||||
}
|
||||
|
||||
void bdrv_graph_wrunlock(void)
|
||||
void no_coroutine_fn bdrv_graph_wrunlock_ctx(AioContext *ctx)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
assert(qatomic_read(&has_writer));
|
||||
|
||||
/*
|
||||
* Release only non-mainloop AioContext. The mainloop often relies on the
|
||||
* BQL and doesn't lock the main AioContext before doing things.
|
||||
*/
|
||||
if (ctx && ctx != qemu_get_aio_context()) {
|
||||
aio_context_release(ctx);
|
||||
} else {
|
||||
ctx = NULL;
|
||||
}
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&aio_context_list_lock) {
|
||||
/*
|
||||
* No need for memory barriers, this works in pair with
|
||||
|
@ -187,6 +197,17 @@ void bdrv_graph_wrunlock(void)
|
|||
* progress.
|
||||
*/
|
||||
aio_bh_poll(qemu_get_aio_context());
|
||||
|
||||
if (ctx) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void no_coroutine_fn bdrv_graph_wrunlock(BlockDriverState *bs)
|
||||
{
|
||||
AioContext *ctx = bs ? bdrv_get_aio_context(bs) : NULL;
|
||||
|
||||
bdrv_graph_wrunlock_ctx(ctx);
|
||||
}
|
||||
|
||||
void coroutine_fn bdrv_graph_co_rdlock(void)
|
||||
|
|
|
@ -773,7 +773,7 @@ static int mirror_exit_common(Job *job)
|
|||
"would not lead to an abrupt change of visible data",
|
||||
to_replace->node_name, target_bs->node_name);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(target_bs);
|
||||
bdrv_drained_end(to_replace);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
|
@ -798,7 +798,7 @@ static int mirror_exit_common(Job *job)
|
|||
block_job_remove_all_bdrv(bjob);
|
||||
bdrv_graph_wrlock(mirror_top_bs);
|
||||
bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(mirror_top_bs);
|
||||
|
||||
bdrv_drained_end(target_bs);
|
||||
bdrv_unref(target_bs);
|
||||
|
@ -1920,7 +1920,7 @@ static BlockJob *mirror_start_job(
|
|||
BLK_PERM_CONSISTENT_READ,
|
||||
errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -1965,17 +1965,17 @@ static BlockJob *mirror_start_job(
|
|||
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
|
||||
iter_shared_perms, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
|
||||
QTAILQ_INIT(&s->ops_in_flight);
|
||||
|
||||
|
@ -2006,7 +2006,7 @@ fail:
|
|||
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
|
||||
&error_abort);
|
||||
bdrv_replace_node(mirror_top_bs, bs, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
bdrv_drained_end(bs);
|
||||
|
||||
bdrv_unref(mirror_top_bs);
|
||||
|
|
|
@ -2809,7 +2809,7 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file)
|
|||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, s->data_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
s->data_file = NULL;
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
}
|
||||
|
|
|
@ -1044,7 +1044,7 @@ close_exit:
|
|||
}
|
||||
bdrv_unref_child(bs, s->children[i]);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
g_free(s->children);
|
||||
g_free(opened);
|
||||
exit:
|
||||
|
@ -1061,7 +1061,7 @@ static void quorum_close(BlockDriverState *bs)
|
|||
for (i = 0; i < s->num_children; i++) {
|
||||
bdrv_unref_child(bs, s->children[i]);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
g_free(s->children);
|
||||
}
|
||||
|
|
|
@ -568,7 +568,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
@ -579,7 +579,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
BDRV_CHILD_DATA, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
}
|
||||
|
@ -592,7 +592,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
if (!top_bs || !bdrv_is_root_node(top_bs) ||
|
||||
!check_top_bs(top_bs, bs)) {
|
||||
error_setg(errp, "No top_bs or it is invalid");
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
reopen_backing_file(bs, false, NULL);
|
||||
aio_context_release(aio_context);
|
||||
return;
|
||||
|
@ -600,7 +600,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
|||
bdrv_op_block_all(top_bs, s->blocker);
|
||||
bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
|
||||
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
|
||||
s->backup_job = backup_job_create(
|
||||
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
|
||||
|
@ -696,7 +696,7 @@ static void replication_done(void *opaque, int ret)
|
|||
s->secondary_disk = NULL;
|
||||
bdrv_unref_child(bs, s->hidden_disk);
|
||||
s->hidden_disk = NULL;
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
s->error = 0;
|
||||
} else {
|
||||
|
|
|
@ -292,7 +292,7 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
|
|||
/* .bdrv_open() will re-attach it */
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, fallback);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp);
|
||||
open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err);
|
||||
|
|
|
@ -99,9 +99,9 @@ static int stream_prepare(Job *job)
|
|||
}
|
||||
}
|
||||
|
||||
bdrv_graph_wrlock(base);
|
||||
bdrv_graph_wrlock(s->target_bs);
|
||||
bdrv_set_backing_hd_drained(unfiltered_bs, base, &local_err);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(s->target_bs);
|
||||
|
||||
/*
|
||||
* This call will do I/O, so the graph can change again from here on.
|
||||
|
@ -369,7 +369,7 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
|||
bdrv_graph_wrlock(bs);
|
||||
if (block_job_add_bdrv(&s->common, "active node", bs, 0,
|
||||
basic_flags | BLK_PERM_WRITE, errp)) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -389,11 +389,11 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
|||
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
|
||||
basic_flags, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
|
||||
s->base_overlay = base_overlay;
|
||||
s->above_base = above_base;
|
||||
|
|
10
block/vmdk.c
10
block/vmdk.c
|
@ -283,7 +283,7 @@ static void vmdk_free_extents(BlockDriverState *bs)
|
|||
bdrv_unref_child(bs, e->file);
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
g_free(s->extents);
|
||||
}
|
||||
|
@ -1237,7 +1237,7 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
|||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
|
@ -1256,7 +1256,7 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
|||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
|
@ -1267,7 +1267,7 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
|||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
|
@ -1277,7 +1277,7 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
|||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = -ENOTSUP;
|
||||
goto out;
|
||||
|
|
|
@ -1613,7 +1613,7 @@ static void external_snapshot_abort(void *opaque)
|
|||
bdrv_drained_begin(state->new_bs);
|
||||
bdrv_graph_wrlock(state->old_bs);
|
||||
bdrv_replace_node(state->new_bs, state->old_bs, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(state->old_bs);
|
||||
bdrv_drained_end(state->new_bs);
|
||||
|
||||
bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */
|
||||
|
@ -3692,7 +3692,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child,
|
|||
}
|
||||
|
||||
out:
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
}
|
||||
|
||||
BlockJobInfoList *qmp_query_block_jobs(Error **errp)
|
||||
|
|
|
@ -212,7 +212,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
|
|||
|
||||
g_slist_free_1(l);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock_ctx(job->job.aio_context);
|
||||
}
|
||||
|
||||
bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
|
||||
|
@ -523,7 +523,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
|||
job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
|
||||
flags, cb, opaque, errp);
|
||||
if (job == NULL) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -563,11 +563,11 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
return job;
|
||||
|
||||
fail:
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(bs);
|
||||
job_early_fail(&job->job);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -623,9 +623,13 @@ static void ahci_init_d2h(AHCIDevice *ad)
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* For simplicity, do not call ahci_clear_cmd_issue() for this
|
||||
* ahci_write_fis_d2h(). (The reset value for PxCI is 0.)
|
||||
*/
|
||||
if (ahci_write_fis_d2h(ad, true)) {
|
||||
ad->init_d2h_sent = true;
|
||||
/* We're emulating receiving the first Reg H2D Fis from the device;
|
||||
/* We're emulating receiving the first Reg D2H FIS from the device;
|
||||
* Update the SIG register, but otherwise proceed as normal. */
|
||||
pr->sig = ((uint32_t)ide_state->hcyl << 24) |
|
||||
(ide_state->lcyl << 16) |
|
||||
|
@ -663,6 +667,7 @@ static void ahci_reset_port(AHCIState *s, int port)
|
|||
pr->scr_act = 0;
|
||||
pr->tfdata = 0x7F;
|
||||
pr->sig = 0xFFFFFFFF;
|
||||
pr->cmd_issue = 0;
|
||||
d->busy_slot = -1;
|
||||
d->init_d2h_sent = false;
|
||||
|
||||
|
@ -1242,10 +1247,30 @@ static void handle_reg_h2d_fis(AHCIState *s, int port,
|
|||
case STATE_RUN:
|
||||
if (cmd_fis[15] & ATA_SRST) {
|
||||
s->dev[port].port_state = STATE_RESET;
|
||||
/*
|
||||
* When setting SRST in the first H2D FIS in the reset sequence,
|
||||
* the device does not send a D2H FIS. Host software thus has to
|
||||
* set the "Clear Busy upon R_OK" bit such that PxCI (and BUSY)
|
||||
* gets cleared. See AHCI 1.3.1, section 10.4.1 Software Reset.
|
||||
*/
|
||||
if (opts & AHCI_CMD_CLR_BUSY) {
|
||||
ahci_clear_cmd_issue(ad, slot);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case STATE_RESET:
|
||||
if (!(cmd_fis[15] & ATA_SRST)) {
|
||||
/*
|
||||
* When clearing SRST in the second H2D FIS in the reset
|
||||
* sequence, the device will execute diagnostics. When this is
|
||||
* done, the device will send a D2H FIS with the good status.
|
||||
* See SATA 3.5a Gold, section 11.4 Software reset protocol.
|
||||
*
|
||||
* This D2H FIS is the first D2H FIS received from the device,
|
||||
* and is received regardless if the reset was performed by a
|
||||
* COMRESET or by setting and clearing the SRST bit. Therefore,
|
||||
* the logic for this is found in ahci_init_d2h() and not here.
|
||||
*/
|
||||
ahci_reset_port(s, port);
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -81,6 +81,18 @@ static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
|
|||
|
||||
static void ide_dummy_transfer_stop(IDEState *s);
|
||||
|
||||
const MemoryRegionPortio ide_portio_list[] = {
|
||||
{ 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
|
||||
{ 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
|
||||
{ 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
|
||||
PORTIO_END_OF_LIST(),
|
||||
};
|
||||
|
||||
const MemoryRegionPortio ide_portio2_list[] = {
|
||||
{ 0, 1, 1, .read = ide_status_read, .write = ide_ctrl_write },
|
||||
PORTIO_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void padstr(char *str, const char *src, int len)
|
||||
{
|
||||
int i, v;
|
||||
|
|
|
@ -28,18 +28,6 @@
|
|||
#include "hw/ide/internal.h"
|
||||
#include "trace.h"
|
||||
|
||||
static const MemoryRegionPortio ide_portio_list[] = {
|
||||
{ 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
|
||||
{ 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
|
||||
{ 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
|
||||
PORTIO_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static const MemoryRegionPortio ide_portio2_list[] = {
|
||||
{ 0, 1, 1, .read = ide_status_read, .write = ide_ctrl_write },
|
||||
PORTIO_END_OF_LIST(),
|
||||
};
|
||||
|
||||
int ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
|
||||
{
|
||||
int ret;
|
||||
|
|
84
hw/ide/pci.c
84
hw/ide/pci.c
|
@ -104,6 +104,90 @@ const MemoryRegionOps pci_ide_data_le_ops = {
|
|||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
void pci_ide_update_mode(PCIIDEState *s)
|
||||
{
|
||||
PCIDevice *d = PCI_DEVICE(s);
|
||||
uint8_t mode = d->config[PCI_CLASS_PROG];
|
||||
|
||||
/*
|
||||
* This function only configures the BARs/ioports for now: PCI IDE
|
||||
* controllers must manage their own IRQ routing
|
||||
*/
|
||||
|
||||
switch (mode & 0xf) {
|
||||
case 0xa:
|
||||
/* Both channels legacy mode */
|
||||
|
||||
/*
|
||||
* TODO: according to the PCI IDE specification the BARs should
|
||||
* be completely disabled, however Linux for the pegasos2
|
||||
* machine stil accesses the BAR addresses after switching to legacy
|
||||
* mode. Hence we leave them active for now.
|
||||
*/
|
||||
|
||||
/* Clear interrupt pin */
|
||||
pci_config_set_interrupt_pin(d->config, 0);
|
||||
|
||||
/* Add legacy IDE ports */
|
||||
if (!s->bus[0].portio_list.owner) {
|
||||
portio_list_init(&s->bus[0].portio_list, OBJECT(d),
|
||||
ide_portio_list, &s->bus[0], "ide");
|
||||
portio_list_add(&s->bus[0].portio_list,
|
||||
pci_address_space_io(d), 0x1f0);
|
||||
}
|
||||
|
||||
if (!s->bus[0].portio2_list.owner) {
|
||||
portio_list_init(&s->bus[0].portio2_list, OBJECT(d),
|
||||
ide_portio2_list, &s->bus[0], "ide");
|
||||
portio_list_add(&s->bus[0].portio2_list,
|
||||
pci_address_space_io(d), 0x3f6);
|
||||
}
|
||||
|
||||
if (!s->bus[1].portio_list.owner) {
|
||||
portio_list_init(&s->bus[1].portio_list, OBJECT(d),
|
||||
ide_portio_list, &s->bus[1], "ide");
|
||||
portio_list_add(&s->bus[1].portio_list,
|
||||
pci_address_space_io(d), 0x170);
|
||||
}
|
||||
|
||||
if (!s->bus[1].portio2_list.owner) {
|
||||
portio_list_init(&s->bus[1].portio2_list, OBJECT(d),
|
||||
ide_portio2_list, &s->bus[1], "ide");
|
||||
portio_list_add(&s->bus[1].portio2_list,
|
||||
pci_address_space_io(d), 0x376);
|
||||
}
|
||||
break;
|
||||
|
||||
case 0xf:
|
||||
/* Both channels native mode */
|
||||
|
||||
/* Set interrupt pin */
|
||||
pci_config_set_interrupt_pin(d->config, 1);
|
||||
|
||||
/* Remove legacy IDE ports */
|
||||
if (s->bus[0].portio_list.owner) {
|
||||
portio_list_del(&s->bus[0].portio_list);
|
||||
portio_list_destroy(&s->bus[0].portio_list);
|
||||
}
|
||||
|
||||
if (s->bus[0].portio2_list.owner) {
|
||||
portio_list_del(&s->bus[0].portio2_list);
|
||||
portio_list_destroy(&s->bus[0].portio2_list);
|
||||
}
|
||||
|
||||
if (s->bus[1].portio_list.owner) {
|
||||
portio_list_del(&s->bus[1].portio_list);
|
||||
portio_list_destroy(&s->bus[1].portio_list);
|
||||
}
|
||||
|
||||
if (s->bus[1].portio2_list.owner) {
|
||||
portio_list_del(&s->bus[1].portio2_list);
|
||||
portio_list_destroy(&s->bus[1].portio2_list);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static IDEState *bmdma_active_if(BMDMAState *bmdma)
|
||||
{
|
||||
assert(bmdma->bus->retry_unit != (uint8_t)-1);
|
||||
|
|
44
hw/ide/via.c
44
hw/ide/via.c
|
@ -28,6 +28,7 @@
|
|||
#include "hw/pci/pci.h"
|
||||
#include "migration/vmstate.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/range.h"
|
||||
#include "sysemu/dma.h"
|
||||
#include "hw/isa/vt82c686.h"
|
||||
#include "hw/ide/pci.h"
|
||||
|
@ -128,16 +129,14 @@ static void via_ide_reset(DeviceState *dev)
|
|||
ide_bus_reset(&d->bus[i]);
|
||||
}
|
||||
|
||||
pci_config_set_prog_interface(pci_conf, 0x8a); /* legacy mode */
|
||||
pci_ide_update_mode(d);
|
||||
|
||||
pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_WAIT);
|
||||
pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_FAST_BACK |
|
||||
PCI_STATUS_DEVSEL_MEDIUM);
|
||||
|
||||
pci_set_long(pci_conf + PCI_BASE_ADDRESS_0, 0x000001f0);
|
||||
pci_set_long(pci_conf + PCI_BASE_ADDRESS_1, 0x000003f4);
|
||||
pci_set_long(pci_conf + PCI_BASE_ADDRESS_2, 0x00000170);
|
||||
pci_set_long(pci_conf + PCI_BASE_ADDRESS_3, 0x00000374);
|
||||
pci_set_long(pci_conf + PCI_BASE_ADDRESS_4, 0x0000cc01); /* BMIBA: 20-23h */
|
||||
pci_set_long(pci_conf + PCI_INTERRUPT_LINE, 0x0000010e);
|
||||
pci_set_byte(pci_conf + PCI_INTERRUPT_LINE, 0xe);
|
||||
|
||||
/* IDE chip enable, IDE configuration 1/2, IDE FIFO Configuration*/
|
||||
pci_set_long(pci_conf + 0x40, 0x0a090600);
|
||||
|
@ -159,6 +158,36 @@ static void via_ide_reset(DeviceState *dev)
|
|||
pci_set_long(pci_conf + 0xc0, 0x00020001);
|
||||
}
|
||||
|
||||
static uint32_t via_ide_cfg_read(PCIDevice *pd, uint32_t addr, int len)
|
||||
{
|
||||
uint32_t val = pci_default_read_config(pd, addr, len);
|
||||
uint8_t mode = pd->config[PCI_CLASS_PROG];
|
||||
|
||||
if ((mode & 0xf) == 0xa && ranges_overlap(addr, len,
|
||||
PCI_BASE_ADDRESS_0, 16)) {
|
||||
/* BARs always read back zero in legacy mode */
|
||||
for (int i = addr; i < addr + len; i++) {
|
||||
if (i >= PCI_BASE_ADDRESS_0 && i < PCI_BASE_ADDRESS_0 + 16) {
|
||||
val &= ~(0xffULL << ((i - addr) << 3));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void via_ide_cfg_write(PCIDevice *pd, uint32_t addr,
|
||||
uint32_t val, int len)
|
||||
{
|
||||
PCIIDEState *d = PCI_IDE(pd);
|
||||
|
||||
pci_default_write_config(pd, addr, val, len);
|
||||
|
||||
if (range_covers_byte(addr, len, PCI_CLASS_PROG)) {
|
||||
pci_ide_update_mode(d);
|
||||
}
|
||||
}
|
||||
|
||||
static void via_ide_realize(PCIDevice *dev, Error **errp)
|
||||
{
|
||||
PCIIDEState *d = PCI_IDE(dev);
|
||||
|
@ -166,7 +195,6 @@ static void via_ide_realize(PCIDevice *dev, Error **errp)
|
|||
uint8_t *pci_conf = dev->config;
|
||||
int i;
|
||||
|
||||
pci_config_set_prog_interface(pci_conf, 0x8a); /* legacy mode */
|
||||
pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x000000c0);
|
||||
dev->wmask[PCI_INTERRUPT_LINE] = 0;
|
||||
dev->wmask[PCI_CLASS_PROG] = 5;
|
||||
|
@ -221,6 +249,8 @@ static void via_ide_class_init(ObjectClass *klass, void *data)
|
|||
/* Reason: only works as function of VIA southbridge */
|
||||
dc->user_creatable = false;
|
||||
|
||||
k->config_read = via_ide_cfg_read;
|
||||
k->config_write = via_ide_cfg_write;
|
||||
k->realize = via_ide_realize;
|
||||
k->exit = via_ide_exitfn;
|
||||
k->vendor_id = PCI_VENDOR_ID_VIA;
|
||||
|
|
|
@ -123,8 +123,21 @@ bdrv_graph_wrlock(BlockDriverState *bs);
|
|||
* bdrv_graph_wrunlock:
|
||||
* Write finished, reset global has_writer to 0 and restart
|
||||
* all readers that are waiting.
|
||||
*
|
||||
* If @bs is non-NULL, its AioContext is temporarily released.
|
||||
*/
|
||||
void bdrv_graph_wrunlock(void) TSA_RELEASE(graph_lock) TSA_NO_TSA;
|
||||
void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA
|
||||
bdrv_graph_wrunlock(BlockDriverState *bs);
|
||||
|
||||
/*
|
||||
* bdrv_graph_wrunlock_ctx:
|
||||
* Write finished, reset global has_writer to 0 and restart
|
||||
* all readers that are waiting.
|
||||
*
|
||||
* If @ctx is non-NULL, its lock is temporarily released.
|
||||
*/
|
||||
void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA
|
||||
bdrv_graph_wrunlock_ctx(AioContext *ctx);
|
||||
|
||||
/*
|
||||
* bdrv_graph_co_rdlock:
|
||||
|
|
|
@ -354,6 +354,9 @@ enum ide_dma_cmd {
|
|||
|
||||
extern const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT];
|
||||
|
||||
extern const MemoryRegionPortio ide_portio_list[];
|
||||
extern const MemoryRegionPortio ide_portio2_list[];
|
||||
|
||||
#define ide_cmd_is_read(s) \
|
||||
((s)->dma_cmd == IDE_DMA_READ)
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val);
|
|||
void bmdma_status_writeb(BMDMAState *bm, uint32_t val);
|
||||
extern MemoryRegionOps bmdma_addr_ioport_ops;
|
||||
void pci_ide_create_devs(PCIDevice *dev);
|
||||
void pci_ide_update_mode(PCIIDEState *s);
|
||||
|
||||
extern const VMStateDescription vmstate_ide_pci;
|
||||
extern const MemoryRegionOps pci_ide_cmd_le_ops;
|
||||
|
|
|
@ -262,7 +262,7 @@ def gen_no_co_wrapper(func: FuncDecl) -> str:
|
|||
graph_unlock=' bdrv_graph_rdunlock_main_loop();'
|
||||
elif func.graph_wrlock:
|
||||
graph_lock=' bdrv_graph_wrlock(NULL);'
|
||||
graph_unlock=' bdrv_graph_wrunlock();'
|
||||
graph_unlock=' bdrv_graph_wrunlock(NULL);'
|
||||
|
||||
return f"""\
|
||||
/*
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
#!/usr/bin/env python3
|
||||
# group: rw quick auto
|
||||
#
|
||||
# Copyright (C) 2023 Red Hat, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# Creator/Owner: Kevin Wolf <kwolf@redhat.com>
|
||||
|
||||
import iotests
|
||||
|
||||
iotests.script_initialize(supported_fmts=['qcow2'],
|
||||
supported_platforms=['linux'])
|
||||
iotests.verify_virtio_scsi_pci_or_ccw()
|
||||
|
||||
with iotests.FilePath('disk1.img') as base1_path, \
|
||||
iotests.FilePath('disk1-snap.img') as snap1_path, \
|
||||
iotests.FilePath('disk2.img') as base2_path, \
|
||||
iotests.FilePath('disk2-snap.img') as snap2_path, \
|
||||
iotests.VM() as vm:
|
||||
|
||||
img_size = '10M'
|
||||
|
||||
# Only one iothread for both disks
|
||||
vm.add_object('iothread,id=iothread0')
|
||||
vm.add_device('virtio-scsi,iothread=iothread0')
|
||||
|
||||
iotests.log('Preparing disks...')
|
||||
for i, base_path, snap_path in ((0, base1_path, snap1_path),
|
||||
(1, base2_path, snap2_path)):
|
||||
iotests.qemu_img_create('-f', iotests.imgfmt, base_path, img_size)
|
||||
iotests.qemu_img_create('-f', iotests.imgfmt, '-b', base_path,
|
||||
'-F', iotests.imgfmt, snap_path)
|
||||
|
||||
iotests.qemu_io_log('-c', f'write 0 {img_size}', base_path)
|
||||
|
||||
vm.add_blockdev(f'file,node-name=disk{i}-base-file,'
|
||||
f'filename={base_path}')
|
||||
vm.add_blockdev(f'qcow2,node-name=disk{i}-base,file=disk{i}-base-file')
|
||||
vm.add_blockdev(f'file,node-name=disk{i}-file,filename={snap_path}')
|
||||
vm.add_blockdev(f'qcow2,node-name=disk{i},file=disk{i}-file,'
|
||||
f'backing=disk{i}-base')
|
||||
vm.add_device(f'scsi-hd,drive=disk{i}')
|
||||
|
||||
iotests.log('Launching VM...')
|
||||
vm.launch()
|
||||
|
||||
iotests.log('Starting stream jobs...')
|
||||
iotests.log(vm.qmp('block-stream', device='disk0', job_id='job0'))
|
||||
iotests.log(vm.qmp('block-stream', device='disk1', job_id='job1'))
|
||||
|
||||
finished = 0
|
||||
while True:
|
||||
try:
|
||||
ev = vm.event_wait('JOB_STATUS_CHANGE', timeout=0.1)
|
||||
if ev is not None and ev['data']['status'] == 'null':
|
||||
finished += 1
|
||||
# The test is done once both jobs are gone
|
||||
if finished == 2:
|
||||
break
|
||||
except TimeoutError:
|
||||
pass
|
||||
vm.cmd('query-jobs')
|
|
@ -0,0 +1,11 @@
|
|||
Preparing disks...
|
||||
wrote 10485760/10485760 bytes at offset 0
|
||||
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
wrote 10485760/10485760 bytes at offset 0
|
||||
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
Launching VM...
|
||||
Starting stream jobs...
|
||||
{"return": {}}
|
||||
{"return": {}}
|
|
@ -809,7 +809,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
|
|||
|
||||
bdrv_graph_wrlock(target);
|
||||
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(target);
|
||||
|
||||
switch (result) {
|
||||
case TEST_JOB_SUCCESS:
|
||||
|
@ -995,7 +995,7 @@ static void bdrv_test_top_close(BlockDriverState *bs)
|
|||
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
|
||||
bdrv_unref_child(bs, c);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
|
@ -1088,7 +1088,7 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
|
|||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
/* This child will be the one to pass to requests through to, and
|
||||
* it will stall until a drain occurs */
|
||||
|
@ -1101,7 +1101,7 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
|
|||
&child_of_bds,
|
||||
BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
/* This child is just there to be deleted
|
||||
* (for detach_instead_of_delete == true) */
|
||||
|
@ -1110,7 +1110,7 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
|
|||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
|
||||
blk_insert_bs(blk, bs, &error_abort);
|
||||
|
@ -1200,7 +1200,7 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque)
|
|||
data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
|
||||
&child_of_bds, BDRV_CHILD_DATA,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
}
|
||||
|
||||
static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret)
|
||||
|
@ -1308,7 +1308,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
|
|||
bdrv_attach_child(parent_a, a, "PA-A",
|
||||
by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
g_assert_cmpint(parent_a->refcnt, ==, 1);
|
||||
g_assert_cmpint(parent_b->refcnt, ==, 1);
|
||||
|
@ -1735,7 +1735,7 @@ static void test_drop_intermediate_poll(void)
|
|||
&chain_child_class, BDRV_CHILD_COW, &error_abort);
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
|
||||
0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
|
||||
|
@ -1985,7 +1985,7 @@ static void do_test_replace_child_mid_drain(int old_drain_count,
|
|||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds,
|
||||
BDRV_CHILD_COW, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
parent_s->setup_completed = true;
|
||||
|
||||
for (i = 0; i < old_drain_count; i++) {
|
||||
|
@ -2018,7 +2018,7 @@ static void do_test_replace_child_mid_drain(int old_drain_count,
|
|||
bdrv_drained_begin(new_child_bs);
|
||||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_replace_node(old_child_bs, new_child_bs, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
bdrv_drained_end(new_child_bs);
|
||||
bdrv_drained_end(old_child_bs);
|
||||
g_assert(parent_bs->quiesce_counter == new_drain_count);
|
||||
|
|
|
@ -140,7 +140,7 @@ static void test_update_perm_tree(void)
|
|||
bdrv_graph_wrlock(NULL);
|
||||
bdrv_attach_child(filter, bs, "child", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
ret = bdrv_append(filter, bs, NULL);
|
||||
|
@ -210,7 +210,7 @@ static void test_should_update_child(void)
|
|||
g_assert(target->backing->bs == bs);
|
||||
bdrv_attach_child(filter, target, "target", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
bdrv_append(filter, bs, &error_abort);
|
||||
aio_context_release(qemu_get_aio_context());
|
||||
|
@ -260,7 +260,7 @@ static void test_parallel_exclusive_write(void)
|
|||
&error_abort);
|
||||
|
||||
bdrv_replace_node(fl1, fl2, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
bdrv_drained_end(fl2);
|
||||
bdrv_drained_end(fl1);
|
||||
|
@ -380,7 +380,7 @@ static void test_parallel_perm_update(void)
|
|||
bdrv_attach_child(fl2, base, "backing", &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
/* Select fl1 as first child to be active */
|
||||
s->selected = c_fl1;
|
||||
|
@ -438,7 +438,7 @@ static void test_append_greedy_filter(void)
|
|||
bdrv_attach_child(top, base, "backing", &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_graph_wrunlock(NULL);
|
||||
|
||||
aio_context_acquire(qemu_get_aio_context());
|
||||
bdrv_append(fl, base, &error_abort);
|
||||
|
|
Loading…
Reference in New Issue