mirror of https://github.com/xemu-project/xemu.git
migration: Do chunk page in postcopy_each_ram_send_discard()
Right now we loop ramblocks for twice, the 1st time chunk the dirty bits with huge page information; the 2nd time we send the discard ranges. That's not necessary - we can do them in a single loop. Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
parent
e3fbf76021
commit
f30c2e5ba8
|
@ -2454,6 +2454,8 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* postcopy_each_ram_send_discard: discard all RAMBlocks
|
* postcopy_each_ram_send_discard: discard all RAMBlocks
|
||||||
*
|
*
|
||||||
|
@ -2475,6 +2477,14 @@ static int postcopy_each_ram_send_discard(MigrationState *ms)
|
||||||
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
postcopy_discard_send_init(ms, block->idstr);
|
postcopy_discard_send_init(ms, block->idstr);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Deal with TPS != HPS and huge pages. It discard any partially sent
|
||||||
|
* host-page size chunks, mark any partially dirty host-page size
|
||||||
|
* chunks as all dirty. In this case the host-page is the host-page
|
||||||
|
* for the particular RAMBlock, i.e. it might be a huge page.
|
||||||
|
*/
|
||||||
|
postcopy_chunk_hostpages_pass(ms, block);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Postcopy sends chunks of bitmap over the wire, but it
|
* Postcopy sends chunks of bitmap over the wire, but it
|
||||||
* just needs indexes at this point, avoids it having
|
* just needs indexes at this point, avoids it having
|
||||||
|
@ -2575,7 +2585,6 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
|
||||||
int ram_postcopy_send_discard_bitmap(MigrationState *ms)
|
int ram_postcopy_send_discard_bitmap(MigrationState *ms)
|
||||||
{
|
{
|
||||||
RAMState *rs = ram_state;
|
RAMState *rs = ram_state;
|
||||||
RAMBlock *block;
|
|
||||||
|
|
||||||
RCU_READ_LOCK_GUARD();
|
RCU_READ_LOCK_GUARD();
|
||||||
|
|
||||||
|
@ -2587,15 +2596,6 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
|
||||||
rs->last_sent_block = NULL;
|
rs->last_sent_block = NULL;
|
||||||
rs->last_page = 0;
|
rs->last_page = 0;
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
|
||||||
/*
|
|
||||||
* Deal with TPS != HPS and huge pages. It discard any partially sent
|
|
||||||
* host-page size chunks, mark any partially dirty host-page size
|
|
||||||
* chunks as all dirty. In this case the host-page is the host-page
|
|
||||||
* for the particular RAMBlock, i.e. it might be a huge page.
|
|
||||||
*/
|
|
||||||
postcopy_chunk_hostpages_pass(ms, block);
|
|
||||||
}
|
|
||||||
trace_ram_postcopy_send_discard_bitmap();
|
trace_ram_postcopy_send_discard_bitmap();
|
||||||
|
|
||||||
return postcopy_each_ram_send_discard(ms);
|
return postcopy_each_ram_send_discard(ms);
|
||||||
|
|
Loading…
Reference in New Issue