mirror of https://github.com/xemu-project/xemu.git
block: New BdrvChildRole.activate() for blk_resume_after_migration()
Instead of manually calling blk_resume_after_migration() in migration code after doing bdrv_invalidate_cache_all(), integrate the BlockBackend activation with cache invalidation into a single function. This is achieved with a new callback in BdrvChildRole that is called by bdrv_invalidate_cache_all(). Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
This commit is contained in:
parent
ace21a5875
commit
4417ab7adf
12
block.c
12
block.c
|
@ -3949,7 +3949,7 @@ void bdrv_init_with_whitelist(void)
|
||||||
|
|
||||||
void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
|
void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
|
||||||
{
|
{
|
||||||
BdrvChild *child;
|
BdrvChild *child, *parent;
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -3985,6 +3985,16 @@ void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
|
||||||
error_setg_errno(errp, -ret, "Could not refresh total sector count");
|
error_setg_errno(errp, -ret, "Could not refresh total sector count");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QLIST_FOREACH(parent, &bs->parents, next_parent) {
|
||||||
|
if (parent->role->activate) {
|
||||||
|
parent->role->activate(parent, &local_err);
|
||||||
|
if (local_err) {
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void bdrv_invalidate_cache_all(Error **errp)
|
void bdrv_invalidate_cache_all(Error **errp)
|
||||||
|
|
|
@ -130,6 +130,32 @@ static const char *blk_root_get_name(BdrvChild *child)
|
||||||
return blk_name(child->opaque);
|
return blk_name(child->opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Notifies the user of the BlockBackend that migration has completed. qdev
|
||||||
|
* devices can tighten their permissions in response (specifically revoke
|
||||||
|
* shared write permissions that we needed for storage migration).
|
||||||
|
*
|
||||||
|
* If an error is returned, the VM cannot be allowed to be resumed.
|
||||||
|
*/
|
||||||
|
static void blk_root_activate(BdrvChild *child, Error **errp)
|
||||||
|
{
|
||||||
|
BlockBackend *blk = child->opaque;
|
||||||
|
Error *local_err = NULL;
|
||||||
|
|
||||||
|
if (!blk->disable_perm) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
blk->disable_perm = false;
|
||||||
|
|
||||||
|
blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
|
||||||
|
if (local_err) {
|
||||||
|
error_propagate(errp, local_err);
|
||||||
|
blk->disable_perm = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static const BdrvChildRole child_root = {
|
static const BdrvChildRole child_root = {
|
||||||
.inherit_options = blk_root_inherit_options,
|
.inherit_options = blk_root_inherit_options,
|
||||||
|
|
||||||
|
@ -140,6 +166,8 @@ static const BdrvChildRole child_root = {
|
||||||
|
|
||||||
.drained_begin = blk_root_drained_begin,
|
.drained_begin = blk_root_drained_begin,
|
||||||
.drained_end = blk_root_drained_end,
|
.drained_end = blk_root_drained_end,
|
||||||
|
|
||||||
|
.activate = blk_root_activate,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -601,34 +629,6 @@ void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
|
||||||
*shared_perm = blk->shared_perm;
|
*shared_perm = blk->shared_perm;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Notifies the user of all BlockBackends that migration has completed. qdev
|
|
||||||
* devices can tighten their permissions in response (specifically revoke
|
|
||||||
* shared write permissions that we needed for storage migration).
|
|
||||||
*
|
|
||||||
* If an error is returned, the VM cannot be allowed to be resumed.
|
|
||||||
*/
|
|
||||||
void blk_resume_after_migration(Error **errp)
|
|
||||||
{
|
|
||||||
BlockBackend *blk;
|
|
||||||
Error *local_err = NULL;
|
|
||||||
|
|
||||||
for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
|
|
||||||
if (!blk->disable_perm) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
blk->disable_perm = false;
|
|
||||||
|
|
||||||
blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
|
|
||||||
if (local_err) {
|
|
||||||
error_propagate(errp, local_err);
|
|
||||||
blk->disable_perm = true;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int blk_do_attach_dev(BlockBackend *blk, void *dev)
|
static int blk_do_attach_dev(BlockBackend *blk, void *dev)
|
||||||
{
|
{
|
||||||
if (blk->dev) {
|
if (blk->dev) {
|
||||||
|
|
|
@ -369,8 +369,6 @@ void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
|
||||||
void bdrv_invalidate_cache_all(Error **errp);
|
void bdrv_invalidate_cache_all(Error **errp);
|
||||||
int bdrv_inactivate_all(void);
|
int bdrv_inactivate_all(void);
|
||||||
|
|
||||||
void blk_resume_after_migration(Error **errp);
|
|
||||||
|
|
||||||
/* Ensure contents are flushed to disk. */
|
/* Ensure contents are flushed to disk. */
|
||||||
int bdrv_flush(BlockDriverState *bs);
|
int bdrv_flush(BlockDriverState *bs);
|
||||||
int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
|
int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
|
||||||
|
|
|
@ -473,6 +473,11 @@ struct BdrvChildRole {
|
||||||
void (*drained_begin)(BdrvChild *child);
|
void (*drained_begin)(BdrvChild *child);
|
||||||
void (*drained_end)(BdrvChild *child);
|
void (*drained_end)(BdrvChild *child);
|
||||||
|
|
||||||
|
/* Notifies the parent that the child has been activated (e.g. when
|
||||||
|
* migration is completing) and it can start requesting permissions and
|
||||||
|
* doing I/O on it. */
|
||||||
|
void (*activate)(BdrvChild *child, Error **errp);
|
||||||
|
|
||||||
void (*attach)(BdrvChild *child);
|
void (*attach)(BdrvChild *child);
|
||||||
void (*detach)(BdrvChild *child);
|
void (*detach)(BdrvChild *child);
|
||||||
};
|
};
|
||||||
|
|
|
@ -341,9 +341,6 @@ static void process_incoming_migration_bh(void *opaque)
|
||||||
/* Make sure all file formats flush their mutable metadata.
|
/* Make sure all file formats flush their mutable metadata.
|
||||||
* If we get an error here, just don't restart the VM yet. */
|
* If we get an error here, just don't restart the VM yet. */
|
||||||
bdrv_invalidate_cache_all(&local_err);
|
bdrv_invalidate_cache_all(&local_err);
|
||||||
if (!local_err) {
|
|
||||||
blk_resume_after_migration(&local_err);
|
|
||||||
}
|
|
||||||
if (local_err) {
|
if (local_err) {
|
||||||
error_report_err(local_err);
|
error_report_err(local_err);
|
||||||
local_err = NULL;
|
local_err = NULL;
|
||||||
|
|
|
@ -1615,9 +1615,6 @@ static void loadvm_postcopy_handle_run_bh(void *opaque)
|
||||||
/* Make sure all file formats flush their mutable metadata.
|
/* Make sure all file formats flush their mutable metadata.
|
||||||
* If we get an error here, just don't restart the VM yet. */
|
* If we get an error here, just don't restart the VM yet. */
|
||||||
bdrv_invalidate_cache_all(&local_err);
|
bdrv_invalidate_cache_all(&local_err);
|
||||||
if (!local_err) {
|
|
||||||
blk_resume_after_migration(&local_err);
|
|
||||||
}
|
|
||||||
if (local_err) {
|
if (local_err) {
|
||||||
error_report_err(local_err);
|
error_report_err(local_err);
|
||||||
local_err = NULL;
|
local_err = NULL;
|
||||||
|
|
6
qmp.c
6
qmp.c
|
@ -207,12 +207,6 @@ void qmp_cont(Error **errp)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_resume_after_migration(&local_err);
|
|
||||||
if (local_err) {
|
|
||||||
error_propagate(errp, local_err);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (runstate_check(RUN_STATE_INMIGRATE)) {
|
if (runstate_check(RUN_STATE_INMIGRATE)) {
|
||||||
autostart = 1;
|
autostart = 1;
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue