mirror of https://github.com/xemu-project/xemu.git
Merge remote-tracking branch 'quintela/migration-anthony-v2' into staging
* quintela/migration-anthony-v2: Maintain the number of dirty pages dirty bitmap: abstract its use Exit loop if we have been there too long Only calculate expected_time for stage 2 Only TCG needs TLB handling No need to iterate if we already are over the limit Add tracepoints for savevm section start/end Add spent time for migration Add migration_end function Add debugging infrastructure Add save_block_hdr function Add MigrationParams structure Add missing check for host_from_stream_offset return value for RAM_SAVE_FLAG_PAGE
This commit is contained in:
commit
3f6e9a5fad
111
arch_init.c
111
arch_init.c
|
@ -44,6 +44,14 @@
|
|||
#include "exec-memory.h"
|
||||
#include "hw/pcspk.h"
|
||||
|
||||
#ifdef DEBUG_ARCH_INIT
|
||||
#define DPRINTF(fmt, ...) \
|
||||
do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
|
||||
#else
|
||||
#define DPRINTF(fmt, ...) \
|
||||
do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef TARGET_SPARC
|
||||
int graphic_width = 1024;
|
||||
int graphic_height = 768;
|
||||
|
@ -161,6 +169,18 @@ static int is_dup_page(uint8_t *page)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
|
||||
int cont, int flag)
|
||||
{
|
||||
qemu_put_be64(f, offset | cont | flag);
|
||||
if (!cont) {
|
||||
qemu_put_byte(f, strlen(block->idstr));
|
||||
qemu_put_buffer(f, (uint8_t *)block->idstr,
|
||||
strlen(block->idstr));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static RAMBlock *last_block;
|
||||
static ram_addr_t last_offset;
|
||||
|
||||
|
@ -187,21 +207,11 @@ static int ram_save_block(QEMUFile *f)
|
|||
p = memory_region_get_ram_ptr(mr) + offset;
|
||||
|
||||
if (is_dup_page(p)) {
|
||||
qemu_put_be64(f, offset | cont | RAM_SAVE_FLAG_COMPRESS);
|
||||
if (!cont) {
|
||||
qemu_put_byte(f, strlen(block->idstr));
|
||||
qemu_put_buffer(f, (uint8_t *)block->idstr,
|
||||
strlen(block->idstr));
|
||||
}
|
||||
save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS);
|
||||
qemu_put_byte(f, *p);
|
||||
bytes_sent = 1;
|
||||
} else {
|
||||
qemu_put_be64(f, offset | cont | RAM_SAVE_FLAG_PAGE);
|
||||
if (!cont) {
|
||||
qemu_put_byte(f, strlen(block->idstr));
|
||||
qemu_put_buffer(f, (uint8_t *)block->idstr,
|
||||
strlen(block->idstr));
|
||||
}
|
||||
save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
|
||||
qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
|
||||
bytes_sent = TARGET_PAGE_SIZE;
|
||||
}
|
||||
|
@ -228,20 +238,7 @@ static uint64_t bytes_transferred;
|
|||
|
||||
static ram_addr_t ram_save_remaining(void)
|
||||
{
|
||||
RAMBlock *block;
|
||||
ram_addr_t count = 0;
|
||||
|
||||
QLIST_FOREACH(block, &ram_list.blocks, next) {
|
||||
ram_addr_t addr;
|
||||
for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
|
||||
if (memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
|
||||
DIRTY_MEMORY_MIGRATION)) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
return ram_list.dirty_pages;
|
||||
}
|
||||
|
||||
uint64_t ram_bytes_remaining(void)
|
||||
|
@ -294,16 +291,23 @@ static void sort_ram_list(void)
|
|||
g_free(blocks);
|
||||
}
|
||||
|
||||
static void migration_end(void)
|
||||
{
|
||||
memory_global_dirty_log_stop();
|
||||
}
|
||||
|
||||
#define MAX_WAIT 50 /* ms, half buffered_file limit */
|
||||
|
||||
int ram_save_live(QEMUFile *f, int stage, void *opaque)
|
||||
{
|
||||
ram_addr_t addr;
|
||||
uint64_t bytes_transferred_last;
|
||||
double bwidth = 0;
|
||||
uint64_t expected_time = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (stage < 0) {
|
||||
memory_global_dirty_log_stop();
|
||||
migration_end();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -340,6 +344,7 @@ int ram_save_live(QEMUFile *f, int stage, void *opaque)
|
|||
bytes_transferred_last = bytes_transferred;
|
||||
bwidth = qemu_get_clock_ns(rt_clock);
|
||||
|
||||
i = 0;
|
||||
while ((ret = qemu_file_rate_limit(f)) == 0) {
|
||||
int bytes_sent;
|
||||
|
||||
|
@ -348,6 +353,20 @@ int ram_save_live(QEMUFile *f, int stage, void *opaque)
|
|||
if (bytes_sent == 0) { /* no more blocks */
|
||||
break;
|
||||
}
|
||||
/* we want to check in the 1st loop, just in case it was the 1st time
|
||||
and we had to sync the dirty bitmap.
|
||||
qemu_get_clock_ns() is a bit expensive, so we only check each some
|
||||
iterations
|
||||
*/
|
||||
if ((i & 63) == 0) {
|
||||
uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
|
||||
if (t1 > MAX_WAIT) {
|
||||
DPRINTF("big wait: " PRIu64 " milliseconds, %d iterations\n",
|
||||
t1, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
|
@ -376,9 +395,16 @@ int ram_save_live(QEMUFile *f, int stage, void *opaque)
|
|||
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||
|
||||
expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
|
||||
if (stage == 2) {
|
||||
uint64_t expected_time;
|
||||
expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
|
||||
|
||||
return (stage == 2) && (expected_time <= migrate_max_downtime());
|
||||
DPRINTF("ram_save_live: expected(" PRIu64 ") <= max(" PRIu64 ")?\n",
|
||||
expected_time, migrate_max_downtime());
|
||||
|
||||
return expected_time <= migrate_max_downtime();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *host_from_stream_offset(QEMUFile *f,
|
||||
|
@ -414,8 +440,11 @@ static inline void *host_from_stream_offset(QEMUFile *f,
|
|||
int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||
{
|
||||
ram_addr_t addr;
|
||||
int flags;
|
||||
int flags, ret = 0;
|
||||
int error;
|
||||
static uint64_t seq_iter;
|
||||
|
||||
seq_iter++;
|
||||
|
||||
if (version_id < 4 || version_id > 4) {
|
||||
return -EINVAL;
|
||||
|
@ -445,8 +474,10 @@ int ram_load(QEMUFile *f, void *opaque, int version_id)
|
|||
|
||||
QLIST_FOREACH(block, &ram_list.blocks, next) {
|
||||
if (!strncmp(id, block->idstr, sizeof(id))) {
|
||||
if (block->length != length)
|
||||
return -EINVAL;
|
||||
if (block->length != length) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -454,7 +485,8 @@ int ram_load(QEMUFile *f, void *opaque, int version_id)
|
|||
if (!block) {
|
||||
fprintf(stderr, "Unknown ramblock \"%s\", cannot "
|
||||
"accept migration\n", id);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
total_ram_bytes -= length;
|
||||
|
@ -483,16 +515,23 @@ int ram_load(QEMUFile *f, void *opaque, int version_id)
|
|||
void *host;
|
||||
|
||||
host = host_from_stream_offset(f, addr, flags);
|
||||
if (!host) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
|
||||
}
|
||||
error = qemu_file_get_error(f);
|
||||
if (error) {
|
||||
return error;
|
||||
ret = error;
|
||||
goto done;
|
||||
}
|
||||
} while (!(flags & RAM_SAVE_FLAG_EOS));
|
||||
|
||||
return 0;
|
||||
done:
|
||||
DPRINTF("Completed load of VM with exit code %d seq iteration " PRIu64 "\n",
|
||||
ret, seq_iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef HAS_AUDIO
|
||||
|
|
|
@ -700,13 +700,13 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void block_set_params(int blk_enable, int shared_base, void *opaque)
|
||||
static void block_set_params(const MigrationParams *params, void *opaque)
|
||||
{
|
||||
block_mig_state.blk_enable = blk_enable;
|
||||
block_mig_state.shared_base = shared_base;
|
||||
block_mig_state.blk_enable = params->blk;
|
||||
block_mig_state.shared_base = params->shared;
|
||||
|
||||
/* shared base means that blk_enable = 1 */
|
||||
block_mig_state.blk_enable |= shared_base;
|
||||
block_mig_state.blk_enable |= params->shared;
|
||||
}
|
||||
|
||||
void blk_mig_init(void)
|
||||
|
|
|
@ -486,6 +486,7 @@ typedef struct RAMBlock {
|
|||
typedef struct RAMList {
|
||||
uint8_t *phys_dirty;
|
||||
QLIST_HEAD(, RAMBlock) blocks;
|
||||
uint64_t dirty_pages;
|
||||
} RAMList;
|
||||
extern RAMList ram_list;
|
||||
|
||||
|
|
|
@ -45,57 +45,71 @@ int cpu_physical_memory_set_dirty_tracking(int enable);
|
|||
#define CODE_DIRTY_FLAG 0x02
|
||||
#define MIGRATION_DIRTY_FLAG 0x08
|
||||
|
||||
/* read dirty bit (return 0 or 1) */
|
||||
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
|
||||
{
|
||||
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
|
||||
}
|
||||
|
||||
static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
|
||||
{
|
||||
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
|
||||
}
|
||||
|
||||
/* read dirty bit (return 0 or 1) */
|
||||
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
|
||||
{
|
||||
return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
|
||||
}
|
||||
|
||||
static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
int dirty_flags)
|
||||
{
|
||||
int ret = 0;
|
||||
uint8_t *p;
|
||||
ram_addr_t addr, end;
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length);
|
||||
start &= TARGET_PAGE_MASK;
|
||||
p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
|
||||
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
||||
ret |= *p++ & dirty_flags;
|
||||
ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
|
||||
{
|
||||
ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
|
||||
}
|
||||
|
||||
static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
|
||||
int dirty_flags)
|
||||
{
|
||||
if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
|
||||
!cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
|
||||
MIGRATION_DIRTY_FLAG)) {
|
||||
ram_list.dirty_pages++;
|
||||
}
|
||||
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
|
||||
{
|
||||
cpu_physical_memory_set_dirty_flags(addr, 0xff);
|
||||
}
|
||||
|
||||
static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
|
||||
int dirty_flags)
|
||||
{
|
||||
int mask = ~dirty_flags;
|
||||
|
||||
if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
|
||||
cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
|
||||
MIGRATION_DIRTY_FLAG)) {
|
||||
ram_list.dirty_pages--;
|
||||
}
|
||||
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
|
||||
}
|
||||
|
||||
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
int dirty_flags)
|
||||
{
|
||||
uint8_t *p;
|
||||
ram_addr_t addr, end;
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length);
|
||||
start &= TARGET_PAGE_MASK;
|
||||
p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
|
||||
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
||||
*p++ |= dirty_flags;
|
||||
cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,16 +117,12 @@ static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
|
|||
ram_addr_t length,
|
||||
int dirty_flags)
|
||||
{
|
||||
int mask;
|
||||
uint8_t *p;
|
||||
ram_addr_t addr, end;
|
||||
|
||||
end = TARGET_PAGE_ALIGN(start + length);
|
||||
start &= TARGET_PAGE_MASK;
|
||||
mask = ~dirty_flags;
|
||||
p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
|
||||
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
||||
*p++ &= mask;
|
||||
cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
38
exec.c
38
exec.c
|
@ -1824,19 +1824,10 @@ void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
|
|||
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
|
||||
}
|
||||
|
||||
/* Note: start and end must be within the same ram block. */
|
||||
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
||||
int dirty_flags)
|
||||
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
|
||||
uintptr_t length)
|
||||
{
|
||||
uintptr_t length, start1;
|
||||
|
||||
start &= TARGET_PAGE_MASK;
|
||||
end = TARGET_PAGE_ALIGN(end);
|
||||
|
||||
length = end - start;
|
||||
if (length == 0)
|
||||
return;
|
||||
cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
|
||||
uintptr_t start1;
|
||||
|
||||
/* we modify the TLB cache so that the dirty bit will be set again
|
||||
when accessing the range */
|
||||
|
@ -1848,6 +1839,26 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
|||
abort();
|
||||
}
|
||||
cpu_tlb_reset_dirty_all(start1, length);
|
||||
|
||||
}
|
||||
|
||||
/* Note: start and end must be within the same ram block. */
|
||||
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
||||
int dirty_flags)
|
||||
{
|
||||
uintptr_t length;
|
||||
|
||||
start &= TARGET_PAGE_MASK;
|
||||
end = TARGET_PAGE_ALIGN(end);
|
||||
|
||||
length = end - start;
|
||||
if (length == 0)
|
||||
return;
|
||||
cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
|
||||
|
||||
if (tcg_enabled()) {
|
||||
tlb_reset_dirty_range_all(start, end, length);
|
||||
}
|
||||
}
|
||||
|
||||
int cpu_physical_memory_set_dirty_tracking(int enable)
|
||||
|
@ -2554,8 +2565,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
|||
|
||||
ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
|
||||
last_ram_offset() >> TARGET_PAGE_BITS);
|
||||
memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
|
||||
0xff, size >> TARGET_PAGE_BITS);
|
||||
cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
|
||||
|
||||
if (kvm_enabled())
|
||||
kvm_setup_guest_memory(new_block->host, size);
|
||||
|
|
2
hmp.c
2
hmp.c
|
@ -145,6 +145,8 @@ void hmp_info_migrate(Monitor *mon)
|
|||
info->ram->remaining >> 10);
|
||||
monitor_printf(mon, "total ram: %" PRIu64 " kbytes\n",
|
||||
info->ram->total >> 10);
|
||||
monitor_printf(mon, "total time: %" PRIu64 " milliseconds\n",
|
||||
info->ram->total_time);
|
||||
}
|
||||
|
||||
if (info->has_disk) {
|
||||
|
|
24
migration.c
24
migration.c
|
@ -131,6 +131,8 @@ MigrationInfo *qmp_query_migrate(Error **errp)
|
|||
info->ram->transferred = ram_bytes_transferred();
|
||||
info->ram->remaining = ram_bytes_remaining();
|
||||
info->ram->total = ram_bytes_total();
|
||||
info->ram->total_time = qemu_get_clock_ms(rt_clock)
|
||||
- s->total_time;
|
||||
|
||||
if (blk_mig_active()) {
|
||||
info->has_disk = true;
|
||||
|
@ -143,6 +145,13 @@ MigrationInfo *qmp_query_migrate(Error **errp)
|
|||
case MIG_STATE_COMPLETED:
|
||||
info->has_status = true;
|
||||
info->status = g_strdup("completed");
|
||||
|
||||
info->has_ram = true;
|
||||
info->ram = g_malloc0(sizeof(*info->ram));
|
||||
info->ram->transferred = ram_bytes_transferred();
|
||||
info->ram->remaining = 0;
|
||||
info->ram->total = ram_bytes_total();
|
||||
info->ram->total_time = s->total_time;
|
||||
break;
|
||||
case MIG_STATE_ERROR:
|
||||
info->has_status = true;
|
||||
|
@ -260,6 +269,7 @@ static void migrate_fd_put_ready(void *opaque)
|
|||
} else {
|
||||
migrate_fd_completed(s);
|
||||
}
|
||||
s->total_time = qemu_get_clock_ms(rt_clock) - s->total_time;
|
||||
if (s->state != MIG_STATE_COMPLETED) {
|
||||
if (old_vm_running) {
|
||||
vm_start();
|
||||
|
@ -352,7 +362,7 @@ void migrate_fd_connect(MigrationState *s)
|
|||
migrate_fd_close);
|
||||
|
||||
DPRINTF("beginning savevm\n");
|
||||
ret = qemu_savevm_state_begin(s->file, s->blk, s->shared);
|
||||
ret = qemu_savevm_state_begin(s->file, &s->params);
|
||||
if (ret < 0) {
|
||||
DPRINTF("failed, %d\n", ret);
|
||||
migrate_fd_error(s);
|
||||
|
@ -361,18 +371,18 @@ void migrate_fd_connect(MigrationState *s)
|
|||
migrate_fd_put_ready(s);
|
||||
}
|
||||
|
||||
static MigrationState *migrate_init(int blk, int inc)
|
||||
static MigrationState *migrate_init(const MigrationParams *params)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
int64_t bandwidth_limit = s->bandwidth_limit;
|
||||
|
||||
memset(s, 0, sizeof(*s));
|
||||
s->bandwidth_limit = bandwidth_limit;
|
||||
s->blk = blk;
|
||||
s->shared = inc;
|
||||
s->params = *params;
|
||||
|
||||
s->bandwidth_limit = bandwidth_limit;
|
||||
s->state = MIG_STATE_SETUP;
|
||||
s->total_time = qemu_get_clock_ms(rt_clock);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
@ -394,9 +404,13 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
|
|||
Error **errp)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
MigrationParams params;
|
||||
const char *p;
|
||||
int ret;
|
||||
|
||||
params.blk = blk;
|
||||
params.shared = inc;
|
||||
|
||||
if (s->state == MIG_STATE_ACTIVE) {
|
||||
error_set(errp, QERR_MIGRATION_ACTIVE);
|
||||
return;
|
||||
|
@ -411,7 +425,7 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
|
|||
return;
|
||||
}
|
||||
|
||||
s = migrate_init(blk, inc);
|
||||
s = migrate_init(¶ms);
|
||||
|
||||
if (strstart(uri, "tcp:", &p)) {
|
||||
ret = tcp_start_outgoing_migration(s, p, errp);
|
||||
|
|
|
@ -19,6 +19,11 @@
|
|||
#include "notify.h"
|
||||
#include "error.h"
|
||||
|
||||
struct MigrationParams {
|
||||
bool blk;
|
||||
bool shared;
|
||||
};
|
||||
|
||||
typedef struct MigrationState MigrationState;
|
||||
|
||||
struct MigrationState
|
||||
|
@ -31,8 +36,8 @@ struct MigrationState
|
|||
int (*close)(MigrationState *s);
|
||||
int (*write)(MigrationState *s, const void *buff, size_t size);
|
||||
void *opaque;
|
||||
int blk;
|
||||
int shared;
|
||||
MigrationParams params;
|
||||
int64_t total_time;
|
||||
};
|
||||
|
||||
void process_incoming_migration(QEMUFile *f);
|
||||
|
|
|
@ -260,10 +260,15 @@
|
|||
#
|
||||
# @total: total amount of bytes involved in the migration process
|
||||
#
|
||||
# @total_time: tota0l amount of ms since migration started. If
|
||||
# migration has ended, it returns the total migration
|
||||
# time. (since 1.2)
|
||||
#
|
||||
# Since: 0.14.0.
|
||||
##
|
||||
{ 'type': 'MigrationStats',
|
||||
'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' } }
|
||||
'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
|
||||
'total_time': 'int' } }
|
||||
|
||||
##
|
||||
# @MigrationInfo
|
||||
|
@ -275,8 +280,9 @@
|
|||
# 'cancelled'. If this field is not returned, no migration process
|
||||
# has been initiated
|
||||
#
|
||||
# @ram: #optional @MigrationStats containing detailed migration status,
|
||||
# only returned if status is 'active'
|
||||
# @ram: #optional @MigrationStats containing detailed migration
|
||||
# status, only returned if status is 'active' or
|
||||
# 'completed'. 'comppleted' (since 1.2)
|
||||
#
|
||||
# @disk: #optional @MigrationStats containing detailed disk migration
|
||||
# status, only returned if status is 'active' and it is a block
|
||||
|
|
|
@ -17,6 +17,7 @@ typedef struct DeviceState DeviceState;
|
|||
|
||||
struct Monitor;
|
||||
typedef struct Monitor Monitor;
|
||||
typedef struct MigrationParams MigrationParams;
|
||||
|
||||
/* we put basic includes here to avoid repeating them in device drivers */
|
||||
#include <stdlib.h>
|
||||
|
|
24
savevm.c
24
savevm.c
|
@ -85,6 +85,7 @@
|
|||
#include "cpus.h"
|
||||
#include "memory.h"
|
||||
#include "qmp-commands.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define SELF_ANNOUNCE_ROUNDS 5
|
||||
|
||||
|
@ -1561,7 +1562,8 @@ bool qemu_savevm_state_blocked(Error **errp)
|
|||
return false;
|
||||
}
|
||||
|
||||
int qemu_savevm_state_begin(QEMUFile *f, int blk_enable, int shared)
|
||||
int qemu_savevm_state_begin(QEMUFile *f,
|
||||
const MigrationParams *params)
|
||||
{
|
||||
SaveStateEntry *se;
|
||||
int ret;
|
||||
|
@ -1569,8 +1571,8 @@ int qemu_savevm_state_begin(QEMUFile *f, int blk_enable, int shared)
|
|||
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
||||
if(se->set_params == NULL) {
|
||||
continue;
|
||||
}
|
||||
se->set_params(blk_enable, shared, se->opaque);
|
||||
}
|
||||
se->set_params(params, se->opaque);
|
||||
}
|
||||
|
||||
qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
|
||||
|
@ -1624,11 +1626,17 @@ int qemu_savevm_state_iterate(QEMUFile *f)
|
|||
if (se->save_live_state == NULL)
|
||||
continue;
|
||||
|
||||
if (qemu_file_rate_limit(f)) {
|
||||
return 0;
|
||||
}
|
||||
trace_savevm_section_start();
|
||||
/* Section type */
|
||||
qemu_put_byte(f, QEMU_VM_SECTION_PART);
|
||||
qemu_put_be32(f, se->section_id);
|
||||
|
||||
ret = se->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque);
|
||||
trace_savevm_section_end(se->section_id);
|
||||
|
||||
if (ret <= 0) {
|
||||
/* Do not proceed to the next vmstate before this one reported
|
||||
completion of the current stage. This serializes the migration
|
||||
|
@ -1658,11 +1666,13 @@ int qemu_savevm_state_complete(QEMUFile *f)
|
|||
if (se->save_live_state == NULL)
|
||||
continue;
|
||||
|
||||
trace_savevm_section_start();
|
||||
/* Section type */
|
||||
qemu_put_byte(f, QEMU_VM_SECTION_END);
|
||||
qemu_put_be32(f, se->section_id);
|
||||
|
||||
ret = se->save_live_state(f, QEMU_VM_SECTION_END, se->opaque);
|
||||
trace_savevm_section_end(se->section_id);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -1674,6 +1684,7 @@ int qemu_savevm_state_complete(QEMUFile *f)
|
|||
if (se->save_state == NULL && se->vmsd == NULL)
|
||||
continue;
|
||||
|
||||
trace_savevm_section_start();
|
||||
/* Section type */
|
||||
qemu_put_byte(f, QEMU_VM_SECTION_FULL);
|
||||
qemu_put_be32(f, se->section_id);
|
||||
|
@ -1687,6 +1698,7 @@ int qemu_savevm_state_complete(QEMUFile *f)
|
|||
qemu_put_be32(f, se->version_id);
|
||||
|
||||
vmstate_save(f, se);
|
||||
trace_savevm_section_end(se->section_id);
|
||||
}
|
||||
|
||||
qemu_put_byte(f, QEMU_VM_EOF);
|
||||
|
@ -1708,13 +1720,17 @@ void qemu_savevm_state_cancel(QEMUFile *f)
|
|||
static int qemu_savevm_state(QEMUFile *f)
|
||||
{
|
||||
int ret;
|
||||
MigrationParams params = {
|
||||
.blk = 0,
|
||||
.shared = 0
|
||||
};
|
||||
|
||||
if (qemu_savevm_state_blocked(NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = qemu_savevm_state_begin(f, 0, 0);
|
||||
ret = qemu_savevm_state_begin(f, ¶ms);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
|
3
sysemu.h
3
sysemu.h
|
@ -77,7 +77,8 @@ void do_info_snapshots(Monitor *mon);
|
|||
void qemu_announce_self(void);
|
||||
|
||||
bool qemu_savevm_state_blocked(Error **errp);
|
||||
int qemu_savevm_state_begin(QEMUFile *f, int blk_enable, int shared);
|
||||
int qemu_savevm_state_begin(QEMUFile *f,
|
||||
const MigrationParams *params);
|
||||
int qemu_savevm_state_iterate(QEMUFile *f);
|
||||
int qemu_savevm_state_complete(QEMUFile *f);
|
||||
void qemu_savevm_state_cancel(QEMUFile *f);
|
||||
|
|
|
@ -864,6 +864,11 @@ displaysurface_resize(void *display_state, void *display_surface, int width, int
|
|||
# vga.c
|
||||
ppm_save(const char *filename, void *display_surface) "%s surface=%p"
|
||||
|
||||
# savevm.c
|
||||
|
||||
savevm_section_start(void) ""
|
||||
savevm_section_end(unsigned int section_id) "section_id %u"
|
||||
|
||||
# hw/qxl.c
|
||||
disable qxl_interface_set_mm_time(int qid, uint32_t mm_time) "%d %d"
|
||||
disable qxl_io_write_vga(int qid, const char *mode, uint32_t addr, uint32_t val) "%d %s addr=%u val=%u"
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#ifndef QEMU_VMSTATE_H
|
||||
#define QEMU_VMSTATE_H 1
|
||||
|
||||
typedef void SaveSetParamsHandler(int blk_enable, int shared, void * opaque);
|
||||
typedef void SaveSetParamsHandler(const MigrationParams *params, void * opaque);
|
||||
typedef void SaveStateHandler(QEMUFile *f, void *opaque);
|
||||
typedef int SaveLiveStateHandler(QEMUFile *f, int stage, void *opaque);
|
||||
typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
|
||||
|
|
Loading…
Reference in New Issue