mirror of https://github.com/xemu-project/xemu.git
migration: add has_postcopy savevm handler
Now postcopy-able states are recognized by not NULL save_live_complete_postcopy handler. But when we have several different postcopy-able states, it is not convenient. Ram postcopy may be disabled, while some other postcopy enabled, in this case Ram state should behave as it is not postcopy-able. This patch add separate has_postcopy handler to specify behaviour of savevm state. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
parent
d7788151a0
commit
c646762736
|
@ -24,6 +24,7 @@ typedef struct SaveVMHandlers {
|
||||||
|
|
||||||
/* This runs both outside and inside the iothread lock. */
|
/* This runs both outside and inside the iothread lock. */
|
||||||
bool (*is_active)(void *opaque);
|
bool (*is_active)(void *opaque);
|
||||||
|
bool (*has_postcopy)(void *opaque);
|
||||||
|
|
||||||
/* This runs outside the iothread lock in the migration case, and
|
/* This runs outside the iothread lock in the migration case, and
|
||||||
* within the lock in the savevm case. The callback had better only
|
* within the lock in the savevm case. The callback had better only
|
||||||
|
|
|
@ -2849,11 +2849,17 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool ram_has_postcopy(void *opaque)
|
||||||
|
{
|
||||||
|
return migrate_postcopy_ram();
|
||||||
|
}
|
||||||
|
|
||||||
static SaveVMHandlers savevm_ram_handlers = {
|
static SaveVMHandlers savevm_ram_handlers = {
|
||||||
.save_setup = ram_save_setup,
|
.save_setup = ram_save_setup,
|
||||||
.save_live_iterate = ram_save_iterate,
|
.save_live_iterate = ram_save_iterate,
|
||||||
.save_live_complete_postcopy = ram_save_complete,
|
.save_live_complete_postcopy = ram_save_complete,
|
||||||
.save_live_complete_precopy = ram_save_complete,
|
.save_live_complete_precopy = ram_save_complete,
|
||||||
|
.has_postcopy = ram_has_postcopy,
|
||||||
.save_live_pending = ram_save_pending,
|
.save_live_pending = ram_save_pending,
|
||||||
.load_state = ram_load,
|
.load_state = ram_load,
|
||||||
.save_cleanup = ram_save_cleanup,
|
.save_cleanup = ram_save_cleanup,
|
||||||
|
|
|
@ -1008,7 +1008,8 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
|
||||||
* call that's already run, it might get confused if we call
|
* call that's already run, it might get confused if we call
|
||||||
* iterate afterwards.
|
* iterate afterwards.
|
||||||
*/
|
*/
|
||||||
if (postcopy && !se->ops->save_live_complete_postcopy) {
|
if (postcopy &&
|
||||||
|
!(se->ops->has_postcopy && se->ops->has_postcopy(se->opaque))) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (qemu_file_rate_limit(f)) {
|
if (qemu_file_rate_limit(f)) {
|
||||||
|
@ -1097,7 +1098,8 @@ int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
|
||||||
|
|
||||||
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
|
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
|
||||||
if (!se->ops ||
|
if (!se->ops ||
|
||||||
(in_postcopy && se->ops->save_live_complete_postcopy) ||
|
(in_postcopy && se->ops->has_postcopy &&
|
||||||
|
se->ops->has_postcopy(se->opaque)) ||
|
||||||
(in_postcopy && !iterable_only) ||
|
(in_postcopy && !iterable_only) ||
|
||||||
!se->ops->save_live_complete_precopy) {
|
!se->ops->save_live_complete_precopy) {
|
||||||
continue;
|
continue;
|
||||||
|
|
Loading…
Reference in New Issue