mirror of https://github.com/xemu-project/xemu.git
Migration pull 2019-05-14
Small fixes/cleanups One HMP/monitor fix -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEERfXHG0oMt/uXep+pBRYzHrxb/ecFAlzbAwkACgkQBRYzHrxb /ecqxA/+Khrvn4mxmFaMb9lLh8SS765we09T5mGPOdWFOrq10bg1d6VX+VEEAH53 pCjpC9ap78iE/A8KGu6yCaULt5cCZAEm2EWvsN3JIZpzi/8xDxn5ebR9+ak2MZ/C 7Xx4U0WAKbzVIoJL+e+z22FcR/KNwNOK8SvYOdzUNgfymjHy+SQUYWIxh1l6ltqs NVbAX2VsTTLIJY8EU9u8TCtCKPqPOqtU0fTB9eeRPJ+MvYV8SNp7pIBX/57yLcuK gTG9m3JkvSd/QZqKVVUf+a1ZIrq17pOLyF7rb8XcGmuVwd+8NJKIjmggzHbHPd3y pxQV1QjwueO263ElhHa8dOd6rJ62wW0fKj9R4KahD562bh5majHLTqc41oVhnpNI V+xavmzMGbgoP8ipUfJesNcn0qO+NYwpLqoUV0qxYdXJG5oHCEA4o0RdwOhHEQ3I MlBaTGl2Hrx3jqHdOhEzfejKpEVgje6FRrkcAvwl3GUbHB9y/RlpQPZEOFs6Qk14 cYt5HwV+MJHREjUY/+nEJ9tmM28H0PNA/i4ZIPrP6PA/DySntJTYJ1vfrSi3zdLf McYial2g5hnesY/WHZpRUzyf5s90rCzt6k7F6R8/3IFc3LotrJVPEl2BOgEtHCNA cllC6yOKGfU2dgsQBa12jm5rn9nEb0zTnsOXXoldfgyYTl0ckF4= =nk59 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20190514b' into staging Migration pull 2019-05-14 Small fixes/cleanups One HMP/monitor fix # gpg: Signature made Tue 14 May 2019 19:03:53 BST # gpg: using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7 # gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full] # Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A 9FA9 0516 331E BC5B FDE7 * remotes/dgilbert/tags/pull-migration-20190514b: monitor: Call mon_get_cpu() only once at hmp_gva2gpa() migration/ram.c: fix typos in comments migration: Fix use-after-free during process exit migration/savevm: wrap into qemu_loadvm_state_header() migration/savevm: load_header before load_setup migration/savevm: remove duplicate check of migration_is_blocked migration: update comments of migration bitmap migration/ram.c: start of migration_bitmap_sync_range is always 0 qemu-option.hx: Update missed parameter for colo-compare migration/colo.h: Remove obsolete codes migration/colo.c: Remove redundant input parameter migration: savevm: fix error code with migration blockers vmstate: check subsection_found is enough migration: remove not used field xfer_limit migration: not necessary to check ops again migration: comment VMSTATE_UNUSED*() properly Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
c1497fba36
|
@ -22,8 +22,6 @@ enum colo_event {
|
||||||
COLO_EVENT_FAILOVER,
|
COLO_EVENT_FAILOVER,
|
||||||
};
|
};
|
||||||
|
|
||||||
void colo_info_init(void);
|
|
||||||
|
|
||||||
void migrate_start_colo_process(MigrationState *s);
|
void migrate_start_colo_process(MigrationState *s);
|
||||||
bool migration_in_colo_state(void);
|
bool migration_in_colo_state(void);
|
||||||
|
|
||||||
|
@ -37,7 +35,7 @@ bool migration_incoming_in_colo_state(void);
|
||||||
COLOMode get_colo_mode(void);
|
COLOMode get_colo_mode(void);
|
||||||
|
|
||||||
/* failover */
|
/* failover */
|
||||||
void colo_do_failover(MigrationState *s);
|
void colo_do_failover(void);
|
||||||
|
|
||||||
void colo_checkpoint_notify(void *opaque);
|
void colo_checkpoint_notify(void *opaque);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1035,6 +1035,20 @@ extern const VMStateInfo vmstate_info_qtailq;
|
||||||
#define VMSTATE_BUFFER_UNSAFE(_field, _state, _version, _size) \
|
#define VMSTATE_BUFFER_UNSAFE(_field, _state, _version, _size) \
|
||||||
VMSTATE_BUFFER_UNSAFE_INFO(_field, _state, _version, vmstate_info_buffer, _size)
|
VMSTATE_BUFFER_UNSAFE_INFO(_field, _state, _version, vmstate_info_buffer, _size)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These VMSTATE_UNUSED*() macros can be used to fill in the holes
|
||||||
|
* when some of the vmstate fields are obsolete to be compatible with
|
||||||
|
* migrations between new/old binaries.
|
||||||
|
*
|
||||||
|
* CAUTION: when using any of the VMSTATE_UNUSED*() macros please be
|
||||||
|
* sure that the size passed in is the size that was actually *sent*
|
||||||
|
* rather than the size of the *structure*. One example is the
|
||||||
|
* boolean type - the size of the structure can vary depending on the
|
||||||
|
* definition of boolean, however the size we actually sent is always
|
||||||
|
* 1 byte (please refer to implementation of VMSTATE_BOOL_V and
|
||||||
|
* vmstate_info_bool). So here we should always pass in size==1
|
||||||
|
* rather than size==sizeof(bool).
|
||||||
|
*/
|
||||||
#define VMSTATE_UNUSED_V(_v, _size) \
|
#define VMSTATE_UNUSED_V(_v, _size) \
|
||||||
VMSTATE_UNUSED_BUFFER(NULL, _v, _size)
|
VMSTATE_UNUSED_BUFFER(NULL, _v, _size)
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ static void colo_failover_bh(void *opaque)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
colo_do_failover(NULL);
|
colo_do_failover();
|
||||||
}
|
}
|
||||||
|
|
||||||
void failover_request_active(Error **errp)
|
void failover_request_active(Error **errp)
|
||||||
|
|
|
@ -193,7 +193,7 @@ COLOMode get_colo_mode(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void colo_do_failover(MigrationState *s)
|
void colo_do_failover(void)
|
||||||
{
|
{
|
||||||
/* Make sure VM stopped while failover happened. */
|
/* Make sure VM stopped while failover happened. */
|
||||||
if (!colo_runstate_is_stopped()) {
|
if (!colo_runstate_is_stopped()) {
|
||||||
|
|
|
@ -1495,10 +1495,8 @@ static void block_cleanup_parameters(MigrationState *s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migrate_fd_cleanup(void *opaque)
|
static void migrate_fd_cleanup(MigrationState *s)
|
||||||
{
|
{
|
||||||
MigrationState *s = opaque;
|
|
||||||
|
|
||||||
qemu_bh_delete(s->cleanup_bh);
|
qemu_bh_delete(s->cleanup_bh);
|
||||||
s->cleanup_bh = NULL;
|
s->cleanup_bh = NULL;
|
||||||
|
|
||||||
|
@ -1543,6 +1541,23 @@ static void migrate_fd_cleanup(void *opaque)
|
||||||
block_cleanup_parameters(s);
|
block_cleanup_parameters(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void migrate_fd_cleanup_schedule(MigrationState *s)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Ref the state for bh, because it may be called when
|
||||||
|
* there're already no other refs
|
||||||
|
*/
|
||||||
|
object_ref(OBJECT(s));
|
||||||
|
qemu_bh_schedule(s->cleanup_bh);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void migrate_fd_cleanup_bh(void *opaque)
|
||||||
|
{
|
||||||
|
MigrationState *s = opaque;
|
||||||
|
migrate_fd_cleanup(s);
|
||||||
|
object_unref(OBJECT(s));
|
||||||
|
}
|
||||||
|
|
||||||
void migrate_set_error(MigrationState *s, const Error *error)
|
void migrate_set_error(MigrationState *s, const Error *error)
|
||||||
{
|
{
|
||||||
qemu_mutex_lock(&s->error_mutex);
|
qemu_mutex_lock(&s->error_mutex);
|
||||||
|
@ -1681,7 +1696,6 @@ void migrate_init(MigrationState *s)
|
||||||
* locks.
|
* locks.
|
||||||
*/
|
*/
|
||||||
s->bytes_xfer = 0;
|
s->bytes_xfer = 0;
|
||||||
s->xfer_limit = 0;
|
|
||||||
s->cleanup_bh = 0;
|
s->cleanup_bh = 0;
|
||||||
s->to_dst_file = NULL;
|
s->to_dst_file = NULL;
|
||||||
s->state = MIGRATION_STATUS_NONE;
|
s->state = MIGRATION_STATUS_NONE;
|
||||||
|
@ -3144,7 +3158,7 @@ static void migration_iteration_finish(MigrationState *s)
|
||||||
error_report("%s: Unknown ending state %d", __func__, s->state);
|
error_report("%s: Unknown ending state %d", __func__, s->state);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
qemu_bh_schedule(s->cleanup_bh);
|
migrate_fd_cleanup_schedule(s);
|
||||||
qemu_mutex_unlock_iothread();
|
qemu_mutex_unlock_iothread();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3279,7 +3293,7 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
|
||||||
bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
|
bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
|
||||||
|
|
||||||
s->expected_downtime = s->parameters.downtime_limit;
|
s->expected_downtime = s->parameters.downtime_limit;
|
||||||
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
|
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
|
||||||
if (error_in) {
|
if (error_in) {
|
||||||
migrate_fd_error(s, error_in);
|
migrate_fd_error(s, error_in);
|
||||||
migrate_fd_cleanup(s);
|
migrate_fd_cleanup(s);
|
||||||
|
|
|
@ -117,7 +117,6 @@ struct MigrationState
|
||||||
|
|
||||||
/*< public >*/
|
/*< public >*/
|
||||||
size_t bytes_xfer;
|
size_t bytes_xfer;
|
||||||
size_t xfer_limit;
|
|
||||||
QemuThread thread;
|
QemuThread thread;
|
||||||
QEMUBH *cleanup_bh;
|
QEMUBH *cleanup_bh;
|
||||||
QEMUFile *to_dst_file;
|
QEMUFile *to_dst_file;
|
||||||
|
|
|
@ -917,7 +917,7 @@ struct {
|
||||||
* - to make easier to know what to free at the end of migration
|
* - to make easier to know what to free at the end of migration
|
||||||
*
|
*
|
||||||
* This way we always know who is the owner of each "pages" struct,
|
* This way we always know who is the owner of each "pages" struct,
|
||||||
* and we don't need any loocking. It belongs to the migration thread
|
* and we don't need any locking. It belongs to the migration thread
|
||||||
* or to the channel thread. Switching is safe because the migration
|
* or to the channel thread. Switching is safe because the migration
|
||||||
* thread is using the channel mutex when changing it, and the channel
|
* thread is using the channel mutex when changing it, and the channel
|
||||||
* have to had finish with its own, otherwise pending_job can't be
|
* have to had finish with its own, otherwise pending_job can't be
|
||||||
|
@ -1630,9 +1630,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
|
||||||
/**
|
/**
|
||||||
* migration_bitmap_find_dirty: find the next dirty page from start
|
* migration_bitmap_find_dirty: find the next dirty page from start
|
||||||
*
|
*
|
||||||
* Called with rcu_read_lock() to protect migration_bitmap
|
* Returns the page offset within memory region of the start of a dirty page
|
||||||
*
|
|
||||||
* Returns the byte offset within memory region of the start of a dirty page
|
|
||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @rb: RAMBlock where to search for dirty pages
|
* @rb: RAMBlock where to search for dirty pages
|
||||||
|
@ -1681,10 +1679,10 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
|
static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
|
||||||
ram_addr_t start, ram_addr_t length)
|
ram_addr_t length)
|
||||||
{
|
{
|
||||||
rs->migration_dirty_pages +=
|
rs->migration_dirty_pages +=
|
||||||
cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
|
cpu_physical_memory_sync_dirty_bitmap(rb, 0, length,
|
||||||
&rs->num_dirty_pages_period);
|
&rs->num_dirty_pages_period);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1773,7 +1771,7 @@ static void migration_bitmap_sync(RAMState *rs)
|
||||||
qemu_mutex_lock(&rs->bitmap_mutex);
|
qemu_mutex_lock(&rs->bitmap_mutex);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
migration_bitmap_sync_range(rs, block, 0, block->used_length);
|
migration_bitmap_sync_range(rs, block, block->used_length);
|
||||||
}
|
}
|
||||||
ram_counters.remaining = ram_bytes_remaining();
|
ram_counters.remaining = ram_bytes_remaining();
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -2146,7 +2144,7 @@ retry:
|
||||||
* find_dirty_block: find the next dirty page and update any state
|
* find_dirty_block: find the next dirty page and update any state
|
||||||
* associated with the search process.
|
* associated with the search process.
|
||||||
*
|
*
|
||||||
* Returns if a page is found
|
* Returns true if a page is found
|
||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @pss: data about the state of the current dirty page scan
|
* @pss: data about the state of the current dirty page scan
|
||||||
|
@ -2242,7 +2240,7 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
|
||||||
*
|
*
|
||||||
* Skips pages that are already sent (!dirty)
|
* Skips pages that are already sent (!dirty)
|
||||||
*
|
*
|
||||||
* Returns if a queued page is found
|
* Returns true if a queued page is found
|
||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @pss: data about the state of the current dirty page scan
|
* @pss: data about the state of the current dirty page scan
|
||||||
|
@ -2681,7 +2679,7 @@ static void ram_save_cleanup(void *opaque)
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
|
||||||
/* caller have hold iothread lock or is in a bh, so there is
|
/* caller have hold iothread lock or is in a bh, so there is
|
||||||
* no writing race against this migration_bitmap
|
* no writing race against the migration bitmap
|
||||||
*/
|
*/
|
||||||
memory_global_dirty_log_stop();
|
memory_global_dirty_log_stop();
|
||||||
|
|
||||||
|
@ -3449,7 +3447,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
||||||
|
|
||||||
/* we want to check in the 1st loop, just in case it was the 1st time
|
/* we want to check in the 1st loop, just in case it was the 1st time
|
||||||
and we had to sync the dirty bitmap.
|
and we had to sync the dirty bitmap.
|
||||||
qemu_get_clock_ns() is a bit expensive, so we only check each some
|
qemu_clock_get_ns() is a bit expensive, so we only check each some
|
||||||
iterations
|
iterations
|
||||||
*/
|
*/
|
||||||
if ((i & 63) == 0) {
|
if ((i & 63) == 0) {
|
||||||
|
@ -4196,7 +4194,7 @@ static void colo_flush_ram_cache(void)
|
||||||
memory_global_dirty_log_sync();
|
memory_global_dirty_log_sync();
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
migration_bitmap_sync_range(ram_state, block, 0, block->used_length);
|
migration_bitmap_sync_range(ram_state, block, block->used_length);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
|
|
@ -1157,15 +1157,13 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
|
||||||
if (!se->ops || !se->ops->save_live_iterate) {
|
if (!se->ops || !se->ops->save_live_iterate) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (se->ops && se->ops->is_active) {
|
if (se->ops->is_active &&
|
||||||
if (!se->ops->is_active(se->opaque)) {
|
!se->ops->is_active(se->opaque)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (se->ops && se->ops->is_active_iterate) {
|
if (se->ops->is_active_iterate &&
|
||||||
if (!se->ops->is_active_iterate(se->opaque)) {
|
!se->ops->is_active_iterate(se->opaque)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* In the postcopy phase, any device that doesn't know how to
|
* In the postcopy phase, any device that doesn't know how to
|
||||||
|
@ -1420,10 +1418,6 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (migration_is_blocked(errp)) {
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (migrate_use_block()) {
|
if (migrate_use_block()) {
|
||||||
error_setg(errp, "Block migration and snapshots are incompatible");
|
error_setg(errp, "Block migration and snapshots are incompatible");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -2268,6 +2262,43 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int qemu_loadvm_state_header(QEMUFile *f)
|
||||||
|
{
|
||||||
|
unsigned int v;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
v = qemu_get_be32(f);
|
||||||
|
if (v != QEMU_VM_FILE_MAGIC) {
|
||||||
|
error_report("Not a migration stream");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
v = qemu_get_be32(f);
|
||||||
|
if (v == QEMU_VM_FILE_VERSION_COMPAT) {
|
||||||
|
error_report("SaveVM v2 format is obsolete and don't work anymore");
|
||||||
|
return -ENOTSUP;
|
||||||
|
}
|
||||||
|
if (v != QEMU_VM_FILE_VERSION) {
|
||||||
|
error_report("Unsupported migration stream version");
|
||||||
|
return -ENOTSUP;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (migrate_get_current()->send_configuration) {
|
||||||
|
if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
|
||||||
|
error_report("Configuration section missing");
|
||||||
|
qemu_loadvm_state_cleanup();
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
qemu_loadvm_state_cleanup();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int qemu_loadvm_state_setup(QEMUFile *f)
|
static int qemu_loadvm_state_setup(QEMUFile *f)
|
||||||
{
|
{
|
||||||
SaveStateEntry *se;
|
SaveStateEntry *se;
|
||||||
|
@ -2416,7 +2447,6 @@ int qemu_loadvm_state(QEMUFile *f)
|
||||||
{
|
{
|
||||||
MigrationIncomingState *mis = migration_incoming_get_current();
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
unsigned int v;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (qemu_savevm_state_blocked(&local_err)) {
|
if (qemu_savevm_state_blocked(&local_err)) {
|
||||||
|
@ -2424,40 +2454,15 @@ int qemu_loadvm_state(QEMUFile *f)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
v = qemu_get_be32(f);
|
ret = qemu_loadvm_state_header(f);
|
||||||
if (v != QEMU_VM_FILE_MAGIC) {
|
if (ret) {
|
||||||
error_report("Not a migration stream");
|
return ret;
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
v = qemu_get_be32(f);
|
|
||||||
if (v == QEMU_VM_FILE_VERSION_COMPAT) {
|
|
||||||
error_report("SaveVM v2 format is obsolete and don't work anymore");
|
|
||||||
return -ENOTSUP;
|
|
||||||
}
|
|
||||||
if (v != QEMU_VM_FILE_VERSION) {
|
|
||||||
error_report("Unsupported migration stream version");
|
|
||||||
return -ENOTSUP;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qemu_loadvm_state_setup(f) != 0) {
|
if (qemu_loadvm_state_setup(f) != 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (migrate_get_current()->send_configuration) {
|
|
||||||
if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
|
|
||||||
error_report("Configuration section missing");
|
|
||||||
qemu_loadvm_state_cleanup();
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
qemu_loadvm_state_cleanup();
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cpu_synchronize_all_pre_loadvm();
|
cpu_synchronize_all_pre_loadvm();
|
||||||
|
|
||||||
ret = qemu_loadvm_state_main(f, mis);
|
ret = qemu_loadvm_state_main(f, mis);
|
||||||
|
@ -2544,7 +2549,7 @@ int save_snapshot(const char *name, Error **errp)
|
||||||
AioContext *aio_context;
|
AioContext *aio_context;
|
||||||
|
|
||||||
if (migration_is_blocked(errp)) {
|
if (migration_is_blocked(errp)) {
|
||||||
return false;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!replay_can_snapshot()) {
|
if (!replay_can_snapshot()) {
|
||||||
|
|
|
@ -496,7 +496,7 @@ static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
|
||||||
void *opaque, QJSON *vmdesc)
|
void *opaque, QJSON *vmdesc)
|
||||||
{
|
{
|
||||||
const VMStateDescription **sub = vmsd->subsections;
|
const VMStateDescription **sub = vmsd->subsections;
|
||||||
bool subsection_found = false;
|
bool vmdesc_has_subsections = false;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
trace_vmstate_subsection_save_top(vmsd->name);
|
trace_vmstate_subsection_save_top(vmsd->name);
|
||||||
|
@ -508,9 +508,9 @@ static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
|
||||||
trace_vmstate_subsection_save_loop(vmsd->name, vmsdsub->name);
|
trace_vmstate_subsection_save_loop(vmsd->name, vmsdsub->name);
|
||||||
if (vmdesc) {
|
if (vmdesc) {
|
||||||
/* Only create subsection array when we have any */
|
/* Only create subsection array when we have any */
|
||||||
if (!subsection_found) {
|
if (!vmdesc_has_subsections) {
|
||||||
json_start_array(vmdesc, "subsections");
|
json_start_array(vmdesc, "subsections");
|
||||||
subsection_found = true;
|
vmdesc_has_subsections = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
json_start_object(vmdesc, NULL);
|
json_start_object(vmdesc, NULL);
|
||||||
|
@ -533,7 +533,7 @@ static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
|
||||||
sub++;
|
sub++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vmdesc && subsection_found) {
|
if (vmdesc_has_subsections) {
|
||||||
json_end_array(vmdesc);
|
json_end_array(vmdesc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1685,8 +1685,7 @@ static void hmp_gva2gpa(Monitor *mon, const QDict *qdict)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
gpa = cpu_get_phys_page_attrs_debug(mon_get_cpu(),
|
gpa = cpu_get_phys_page_attrs_debug(cs, addr & TARGET_PAGE_MASK, &attrs);
|
||||||
addr & TARGET_PAGE_MASK, &attrs);
|
|
||||||
if (gpa == -1) {
|
if (gpa == -1) {
|
||||||
monitor_printf(mon, "Unmapped\n");
|
monitor_printf(mon, "Unmapped\n");
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -4425,13 +4425,15 @@ Dump the network traffic on netdev @var{dev} to the file specified by
|
||||||
The file format is libpcap, so it can be analyzed with tools such as tcpdump
|
The file format is libpcap, so it can be analyzed with tools such as tcpdump
|
||||||
or Wireshark.
|
or Wireshark.
|
||||||
|
|
||||||
@item -object colo-compare,id=@var{id},primary_in=@var{chardevid},secondary_in=@var{chardevid},outdev=@var{chardevid}[,vnet_hdr_support]
|
@item -object colo-compare,id=@var{id},primary_in=@var{chardevid},secondary_in=@var{chardevid},outdev=@var{chardevid},iothread=@var{id}[,vnet_hdr_support]
|
||||||
|
|
||||||
Colo-compare gets packet from primary_in@var{chardevid} and secondary_in@var{chardevid}, than compare primary packet with
|
Colo-compare gets packet from primary_in@var{chardevid} and secondary_in@var{chardevid}, than compare primary packet with
|
||||||
secondary packet. If the packets are same, we will output primary
|
secondary packet. If the packets are same, we will output primary
|
||||||
packet to outdev@var{chardevid}, else we will notify colo-frame
|
packet to outdev@var{chardevid}, else we will notify colo-frame
|
||||||
do checkpoint and send primary packet to outdev@var{chardevid}.
|
do checkpoint and send primary packet to outdev@var{chardevid}.
|
||||||
if it has the vnet_hdr_support flag, colo compare will send/recv packet with vnet_hdr_len.
|
In order to improve efficiency, we need to put the task of comparison
|
||||||
|
in another thread. If it has the vnet_hdr_support flag, colo compare
|
||||||
|
will send/recv packet with vnet_hdr_len.
|
||||||
|
|
||||||
we must use it with the help of filter-mirror and filter-redirector.
|
we must use it with the help of filter-mirror and filter-redirector.
|
||||||
|
|
||||||
|
@ -4446,10 +4448,11 @@ primary:
|
||||||
-chardev socket,id=compare0-0,host=3.3.3.3,port=9001
|
-chardev socket,id=compare0-0,host=3.3.3.3,port=9001
|
||||||
-chardev socket,id=compare_out,host=3.3.3.3,port=9005,server,nowait
|
-chardev socket,id=compare_out,host=3.3.3.3,port=9005,server,nowait
|
||||||
-chardev socket,id=compare_out0,host=3.3.3.3,port=9005
|
-chardev socket,id=compare_out0,host=3.3.3.3,port=9005
|
||||||
|
-object iothread,id=iothread1
|
||||||
-object filter-mirror,id=m0,netdev=hn0,queue=tx,outdev=mirror0
|
-object filter-mirror,id=m0,netdev=hn0,queue=tx,outdev=mirror0
|
||||||
-object filter-redirector,netdev=hn0,id=redire0,queue=rx,indev=compare_out
|
-object filter-redirector,netdev=hn0,id=redire0,queue=rx,indev=compare_out
|
||||||
-object filter-redirector,netdev=hn0,id=redire1,queue=rx,outdev=compare0
|
-object filter-redirector,netdev=hn0,id=redire1,queue=rx,outdev=compare0
|
||||||
-object colo-compare,id=comp0,primary_in=compare0-0,secondary_in=compare1,outdev=compare_out0
|
-object colo-compare,id=comp0,primary_in=compare0-0,secondary_in=compare1,outdev=compare_out0,iothread=iothread1
|
||||||
|
|
||||||
secondary:
|
secondary:
|
||||||
-netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,down script=/etc/qemu-ifdown
|
-netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,down script=/etc/qemu-ifdown
|
||||||
|
|
Loading…
Reference in New Issue