mirror of https://github.com/xemu-project/xemu.git
migration: Add an ability to ignore shared RAM blocks
If ignore-shared capability is set then skip shared RAMBlocks during the RAM migration. Also, move qemu_ram_foreach_migratable_block (and rename) to the migration code, because it requires access to the migration capabilities. Signed-off-by: Yury Kotov <yury-kotov@yandex-team.ru> Message-Id: <20190215174548.2630-4-yury-kotov@yandex-team.ru> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
parent
18269069c3
commit
fbd162e629
19
exec.c
19
exec.c
|
@ -3985,25 +3985,6 @@ int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque)
|
|
||||||
{
|
|
||||||
RAMBlock *block;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
RAMBLOCK_FOREACH(block) {
|
|
||||||
if (!qemu_ram_is_migratable(block)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ret = func(block, opaque);
|
|
||||||
if (ret) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unmap pages of memory from start to start+length such that
|
* Unmap pages of memory from start to start+length such that
|
||||||
* they a) read as 0, b) Trigger whatever fault mechanism
|
* they a) read as 0, b) Trigger whatever fault mechanism
|
||||||
|
|
|
@ -122,7 +122,6 @@ extern struct MemoryRegion io_mem_notdirty;
|
||||||
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
|
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
|
||||||
|
|
||||||
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
|
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
|
||||||
int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque);
|
|
||||||
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
|
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -306,8 +306,10 @@ void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value);
|
||||||
void dirty_bitmap_mig_before_vm_start(void);
|
void dirty_bitmap_mig_before_vm_start(void);
|
||||||
void init_dirty_bitmap_incoming_migration(void);
|
void init_dirty_bitmap_incoming_migration(void);
|
||||||
|
|
||||||
|
int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque);
|
||||||
|
|
||||||
#define qemu_ram_foreach_block \
|
#define qemu_ram_foreach_block \
|
||||||
#warning "Use qemu_ram_foreach_block_migratable in migration code"
|
#warning "Use foreach_not_ignored_block in migration code"
|
||||||
|
|
||||||
void migration_make_urgent_request(void);
|
void migration_make_urgent_request(void);
|
||||||
void migration_consume_urgent_request(void);
|
void migration_consume_urgent_request(void);
|
||||||
|
|
|
@ -374,7 +374,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We don't support postcopy with shared RAM yet */
|
/* We don't support postcopy with shared RAM yet */
|
||||||
if (qemu_ram_foreach_migratable_block(test_ramblock_postcopiable, NULL)) {
|
if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -508,7 +508,7 @@ static int cleanup_range(RAMBlock *rb, void *opaque)
|
||||||
*/
|
*/
|
||||||
int postcopy_ram_incoming_init(MigrationIncomingState *mis)
|
int postcopy_ram_incoming_init(MigrationIncomingState *mis)
|
||||||
{
|
{
|
||||||
if (qemu_ram_foreach_migratable_block(init_range, NULL)) {
|
if (foreach_not_ignored_block(init_range, NULL)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -550,7 +550,7 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qemu_ram_foreach_migratable_block(cleanup_range, mis)) {
|
if (foreach_not_ignored_block(cleanup_range, mis)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -617,7 +617,7 @@ static int nhp_range(RAMBlock *rb, void *opaque)
|
||||||
*/
|
*/
|
||||||
int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
|
int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
|
||||||
{
|
{
|
||||||
if (qemu_ram_foreach_migratable_block(nhp_range, mis)) {
|
if (foreach_not_ignored_block(nhp_range, mis)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -628,7 +628,7 @@ int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark the given area of RAM as requiring notification to unwritten areas
|
* Mark the given area of RAM as requiring notification to unwritten areas
|
||||||
* Used as a callback on qemu_ram_foreach_migratable_block.
|
* Used as a callback on foreach_not_ignored_block.
|
||||||
* host_addr: Base of area to mark
|
* host_addr: Base of area to mark
|
||||||
* offset: Offset in the whole ram arena
|
* offset: Offset in the whole ram arena
|
||||||
* length: Length of the section
|
* length: Length of the section
|
||||||
|
@ -1122,7 +1122,7 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis)
|
||||||
mis->have_fault_thread = true;
|
mis->have_fault_thread = true;
|
||||||
|
|
||||||
/* Mark so that we get notified of accesses to unwritten areas */
|
/* Mark so that we get notified of accesses to unwritten areas */
|
||||||
if (qemu_ram_foreach_migratable_block(ram_block_enable_notify, mis)) {
|
if (foreach_not_ignored_block(ram_block_enable_notify, mis)) {
|
||||||
error_report("ram_block_enable_notify failed");
|
error_report("ram_block_enable_notify failed");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
110
migration/ram.c
110
migration/ram.c
|
@ -159,18 +159,44 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool ramblock_is_ignored(RAMBlock *block)
|
||||||
|
{
|
||||||
|
return !qemu_ram_is_migratable(block) ||
|
||||||
|
(migrate_ignore_shared() && qemu_ram_is_shared(block));
|
||||||
|
}
|
||||||
|
|
||||||
/* Should be holding either ram_list.mutex, or the RCU lock. */
|
/* Should be holding either ram_list.mutex, or the RCU lock. */
|
||||||
|
#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
|
||||||
|
INTERNAL_RAMBLOCK_FOREACH(block) \
|
||||||
|
if (ramblock_is_ignored(block)) {} else
|
||||||
|
|
||||||
#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
|
#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
|
||||||
INTERNAL_RAMBLOCK_FOREACH(block) \
|
INTERNAL_RAMBLOCK_FOREACH(block) \
|
||||||
if (!qemu_ram_is_migratable(block)) {} else
|
if (!qemu_ram_is_migratable(block)) {} else
|
||||||
|
|
||||||
#undef RAMBLOCK_FOREACH
|
#undef RAMBLOCK_FOREACH
|
||||||
|
|
||||||
|
int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
|
||||||
|
{
|
||||||
|
RAMBlock *block;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
|
ret = func(block, opaque);
|
||||||
|
if (ret) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static void ramblock_recv_map_init(void)
|
static void ramblock_recv_map_init(void)
|
||||||
{
|
{
|
||||||
RAMBlock *rb;
|
RAMBlock *rb;
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(rb) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
|
||||||
assert(!rb->receivedmap);
|
assert(!rb->receivedmap);
|
||||||
rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
|
rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
|
||||||
}
|
}
|
||||||
|
@ -1545,7 +1571,7 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
|
||||||
unsigned long *bitmap = rb->bmap;
|
unsigned long *bitmap = rb->bmap;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
if (!qemu_ram_is_migratable(rb)) {
|
if (ramblock_is_ignored(rb)) {
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1594,7 +1620,7 @@ uint64_t ram_pagesize_summary(void)
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
uint64_t summary = 0;
|
uint64_t summary = 0;
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
summary |= block->page_size;
|
summary |= block->page_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1664,7 +1690,7 @@ static void migration_bitmap_sync(RAMState *rs)
|
||||||
|
|
||||||
qemu_mutex_lock(&rs->bitmap_mutex);
|
qemu_mutex_lock(&rs->bitmap_mutex);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
migration_bitmap_sync_range(rs, block, 0, block->used_length);
|
migration_bitmap_sync_range(rs, block, 0, block->used_length);
|
||||||
}
|
}
|
||||||
ram_counters.remaining = ram_bytes_remaining();
|
ram_counters.remaining = ram_bytes_remaining();
|
||||||
|
@ -2388,7 +2414,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
|
||||||
size_t pagesize_bits =
|
size_t pagesize_bits =
|
||||||
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
|
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
|
||||||
|
|
||||||
if (!qemu_ram_is_migratable(pss->block)) {
|
if (ramblock_is_ignored(pss->block)) {
|
||||||
error_report("block %s should not be migrated !", pss->block->idstr);
|
error_report("block %s should not be migrated !", pss->block->idstr);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2486,19 +2512,30 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t ram_bytes_total(void)
|
static uint64_t ram_bytes_total_common(bool count_ignored)
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
uint64_t total = 0;
|
uint64_t total = 0;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
if (count_ignored) {
|
||||||
total += block->used_length;
|
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
||||||
|
total += block->used_length;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
|
total += block->used_length;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t ram_bytes_total(void)
|
||||||
|
{
|
||||||
|
return ram_bytes_total_common(false);
|
||||||
|
}
|
||||||
|
|
||||||
static void xbzrle_load_setup(void)
|
static void xbzrle_load_setup(void)
|
||||||
{
|
{
|
||||||
XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
|
XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
|
||||||
|
@ -2547,7 +2584,7 @@ static void ram_save_cleanup(void *opaque)
|
||||||
*/
|
*/
|
||||||
memory_global_dirty_log_stop();
|
memory_global_dirty_log_stop();
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
g_free(block->bmap);
|
g_free(block->bmap);
|
||||||
block->bmap = NULL;
|
block->bmap = NULL;
|
||||||
g_free(block->unsentmap);
|
g_free(block->unsentmap);
|
||||||
|
@ -2610,7 +2647,7 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
|
||||||
{
|
{
|
||||||
struct RAMBlock *block;
|
struct RAMBlock *block;
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
unsigned long *bitmap = block->bmap;
|
unsigned long *bitmap = block->bmap;
|
||||||
unsigned long range = block->used_length >> TARGET_PAGE_BITS;
|
unsigned long range = block->used_length >> TARGET_PAGE_BITS;
|
||||||
unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
|
unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
|
||||||
|
@ -2688,7 +2725,7 @@ static int postcopy_each_ram_send_discard(MigrationState *ms)
|
||||||
struct RAMBlock *block;
|
struct RAMBlock *block;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
PostcopyDiscardState *pds =
|
PostcopyDiscardState *pds =
|
||||||
postcopy_discard_send_init(ms, block->idstr);
|
postcopy_discard_send_init(ms, block->idstr);
|
||||||
|
|
||||||
|
@ -2896,7 +2933,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
|
||||||
rs->last_sent_block = NULL;
|
rs->last_sent_block = NULL;
|
||||||
rs->last_page = 0;
|
rs->last_page = 0;
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
|
unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
|
||||||
unsigned long *bitmap = block->bmap;
|
unsigned long *bitmap = block->bmap;
|
||||||
unsigned long *unsentmap = block->unsentmap;
|
unsigned long *unsentmap = block->unsentmap;
|
||||||
|
@ -3062,7 +3099,7 @@ static void ram_list_init_bitmaps(void)
|
||||||
|
|
||||||
/* Skip setting bitmap if there is no RAM */
|
/* Skip setting bitmap if there is no RAM */
|
||||||
if (ram_bytes_total()) {
|
if (ram_bytes_total()) {
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
pages = block->max_length >> TARGET_PAGE_BITS;
|
pages = block->max_length >> TARGET_PAGE_BITS;
|
||||||
block->bmap = bitmap_new(pages);
|
block->bmap = bitmap_new(pages);
|
||||||
bitmap_set(block->bmap, 0, pages);
|
bitmap_set(block->bmap, 0, pages);
|
||||||
|
@ -3117,7 +3154,7 @@ static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
|
||||||
* about dirty page logging as well.
|
* about dirty page logging as well.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
pages += bitmap_count_one(block->bmap,
|
pages += bitmap_count_one(block->bmap,
|
||||||
block->used_length >> TARGET_PAGE_BITS);
|
block->used_length >> TARGET_PAGE_BITS);
|
||||||
}
|
}
|
||||||
|
@ -3176,7 +3213,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
|
qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
||||||
qemu_put_byte(f, strlen(block->idstr));
|
qemu_put_byte(f, strlen(block->idstr));
|
||||||
|
@ -3185,6 +3222,10 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||||
if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
|
if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
|
||||||
qemu_put_be64(f, block->page_size);
|
qemu_put_be64(f, block->page_size);
|
||||||
}
|
}
|
||||||
|
if (migrate_ignore_shared()) {
|
||||||
|
qemu_put_be64(f, block->mr->addr);
|
||||||
|
qemu_put_byte(f, ramblock_is_ignored(block) ? 1 : 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -3443,7 +3484,7 @@ static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!qemu_ram_is_migratable(block)) {
|
if (ramblock_is_ignored(block)) {
|
||||||
error_report("block %s should not be migrated !", id);
|
error_report("block %s should not be migrated !", id);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -3698,7 +3739,7 @@ int colo_init_ram_cache(void)
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
block->colo_cache = qemu_anon_ram_alloc(block->used_length,
|
block->colo_cache = qemu_anon_ram_alloc(block->used_length,
|
||||||
NULL,
|
NULL,
|
||||||
false);
|
false);
|
||||||
|
@ -3719,7 +3760,7 @@ int colo_init_ram_cache(void)
|
||||||
if (ram_bytes_total()) {
|
if (ram_bytes_total()) {
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
|
unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
|
||||||
|
|
||||||
block->bmap = bitmap_new(pages);
|
block->bmap = bitmap_new(pages);
|
||||||
|
@ -3734,7 +3775,7 @@ int colo_init_ram_cache(void)
|
||||||
|
|
||||||
out_locked:
|
out_locked:
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
if (block->colo_cache) {
|
if (block->colo_cache) {
|
||||||
qemu_anon_ram_free(block->colo_cache, block->used_length);
|
qemu_anon_ram_free(block->colo_cache, block->used_length);
|
||||||
block->colo_cache = NULL;
|
block->colo_cache = NULL;
|
||||||
|
@ -3751,14 +3792,14 @@ void colo_release_ram_cache(void)
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
|
||||||
memory_global_dirty_log_stop();
|
memory_global_dirty_log_stop();
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
g_free(block->bmap);
|
g_free(block->bmap);
|
||||||
block->bmap = NULL;
|
block->bmap = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
if (block->colo_cache) {
|
if (block->colo_cache) {
|
||||||
qemu_anon_ram_free(block->colo_cache, block->used_length);
|
qemu_anon_ram_free(block->colo_cache, block->used_length);
|
||||||
block->colo_cache = NULL;
|
block->colo_cache = NULL;
|
||||||
|
@ -3794,7 +3835,7 @@ static int ram_load_cleanup(void *opaque)
|
||||||
{
|
{
|
||||||
RAMBlock *rb;
|
RAMBlock *rb;
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(rb) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
|
||||||
if (ramblock_is_pmem(rb)) {
|
if (ramblock_is_pmem(rb)) {
|
||||||
pmem_persist(rb->host, rb->used_length);
|
pmem_persist(rb->host, rb->used_length);
|
||||||
}
|
}
|
||||||
|
@ -3803,7 +3844,7 @@ static int ram_load_cleanup(void *opaque)
|
||||||
xbzrle_load_cleanup();
|
xbzrle_load_cleanup();
|
||||||
compress_threads_load_cleanup();
|
compress_threads_load_cleanup();
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(rb) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
|
||||||
g_free(rb->receivedmap);
|
g_free(rb->receivedmap);
|
||||||
rb->receivedmap = NULL;
|
rb->receivedmap = NULL;
|
||||||
}
|
}
|
||||||
|
@ -4003,7 +4044,7 @@ static void colo_flush_ram_cache(void)
|
||||||
|
|
||||||
memory_global_dirty_log_sync();
|
memory_global_dirty_log_sync();
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
migration_bitmap_sync_range(ram_state, block, 0, block->used_length);
|
migration_bitmap_sync_range(ram_state, block, 0, block->used_length);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -4146,6 +4187,23 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (migrate_ignore_shared()) {
|
||||||
|
hwaddr addr = qemu_get_be64(f);
|
||||||
|
bool ignored = qemu_get_byte(f);
|
||||||
|
if (ignored != ramblock_is_ignored(block)) {
|
||||||
|
error_report("RAM block %s should %s be migrated",
|
||||||
|
id, ignored ? "" : "not");
|
||||||
|
ret = -EINVAL;
|
||||||
|
}
|
||||||
|
if (ramblock_is_ignored(block) &&
|
||||||
|
block->mr->addr != addr) {
|
||||||
|
error_report("Mismatched GPAs for block %s "
|
||||||
|
"%" PRId64 "!= %" PRId64,
|
||||||
|
id, (uint64_t)addr,
|
||||||
|
(uint64_t)block->mr->addr);
|
||||||
|
ret = -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
|
ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
|
||||||
block->idstr);
|
block->idstr);
|
||||||
} else {
|
} else {
|
||||||
|
@ -4216,7 +4274,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||||
static bool ram_has_postcopy(void *opaque)
|
static bool ram_has_postcopy(void *opaque)
|
||||||
{
|
{
|
||||||
RAMBlock *rb;
|
RAMBlock *rb;
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(rb) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
|
||||||
if (ramblock_is_pmem(rb)) {
|
if (ramblock_is_pmem(rb)) {
|
||||||
info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
|
info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
|
||||||
"is not supported now!", rb->idstr, rb->host);
|
"is not supported now!", rb->idstr, rb->host);
|
||||||
|
@ -4236,7 +4294,7 @@ static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
|
||||||
|
|
||||||
trace_ram_dirty_bitmap_sync_start();
|
trace_ram_dirty_bitmap_sync_start();
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
qemu_savevm_send_recv_bitmap(file, block->idstr);
|
qemu_savevm_send_recv_bitmap(file, block->idstr);
|
||||||
trace_ram_dirty_bitmap_request(block->idstr);
|
trace_ram_dirty_bitmap_request(block->idstr);
|
||||||
ramblock_count++;
|
ramblock_count++;
|
||||||
|
|
|
@ -644,7 +644,7 @@ static int qemu_rdma_init_ram_blocks(RDMAContext *rdma)
|
||||||
|
|
||||||
assert(rdma->blockmap == NULL);
|
assert(rdma->blockmap == NULL);
|
||||||
memset(local, 0, sizeof *local);
|
memset(local, 0, sizeof *local);
|
||||||
qemu_ram_foreach_migratable_block(qemu_rdma_init_one_block, rdma);
|
foreach_not_ignored_block(qemu_rdma_init_one_block, rdma);
|
||||||
trace_qemu_rdma_init_ram_blocks(local->nb_blocks);
|
trace_qemu_rdma_init_ram_blocks(local->nb_blocks);
|
||||||
rdma->dest_blocks = g_new0(RDMADestBlock,
|
rdma->dest_blocks = g_new0(RDMADestBlock,
|
||||||
rdma->local_ram_blocks.nb_blocks);
|
rdma->local_ram_blocks.nb_blocks);
|
||||||
|
|
Loading…
Reference in New Issue