mirror of https://github.com/xemu-project/xemu.git
ram: Use ramblock and page offset instead of absolute offset
This removes the needto pass also the absolute offset. Signed-off-by: Juan Quintela <quintela@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
parent
a935e30fbb
commit
f20e286516
|
@ -609,12 +609,10 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @rb: RAMBlock where to search for dirty pages
|
* @rb: RAMBlock where to search for dirty pages
|
||||||
* @start: page where we start the search
|
* @start: page where we start the search
|
||||||
* @page_abs: pointer into where to store the dirty page
|
|
||||||
*/
|
*/
|
||||||
static inline
|
static inline
|
||||||
unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
|
unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
|
||||||
unsigned long start,
|
unsigned long start)
|
||||||
unsigned long *page_abs)
|
|
||||||
{
|
{
|
||||||
unsigned long base = rb->offset >> TARGET_PAGE_BITS;
|
unsigned long base = rb->offset >> TARGET_PAGE_BITS;
|
||||||
unsigned long nr = base + start;
|
unsigned long nr = base + start;
|
||||||
|
@ -631,17 +629,18 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
|
||||||
next = find_next_bit(bitmap, size, nr);
|
next = find_next_bit(bitmap, size, nr);
|
||||||
}
|
}
|
||||||
|
|
||||||
*page_abs = next;
|
|
||||||
return next - base;
|
return next - base;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
||||||
unsigned long page_abs)
|
RAMBlock *rb,
|
||||||
|
unsigned long page)
|
||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
||||||
|
unsigned long nr = (rb->offset >> TARGET_PAGE_BITS) + page;
|
||||||
|
|
||||||
ret = test_and_clear_bit(page_abs, bitmap);
|
ret = test_and_clear_bit(nr, bitmap);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
rs->migration_dirty_pages--;
|
rs->migration_dirty_pages--;
|
||||||
|
@ -1053,13 +1052,10 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @pss: data about the state of the current dirty page scan
|
* @pss: data about the state of the current dirty page scan
|
||||||
* @again: set to false if the search has scanned the whole of RAM
|
* @again: set to false if the search has scanned the whole of RAM
|
||||||
* @page_abs: pointer into where to store the dirty page
|
|
||||||
*/
|
*/
|
||||||
static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss,
|
static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
|
||||||
bool *again, unsigned long *page_abs)
|
|
||||||
{
|
{
|
||||||
pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page,
|
pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
|
||||||
page_abs);
|
|
||||||
if (pss->complete_round && pss->block == rs->last_seen_block &&
|
if (pss->complete_round && pss->block == rs->last_seen_block &&
|
||||||
pss->page >= rs->last_page) {
|
pss->page >= rs->last_page) {
|
||||||
/*
|
/*
|
||||||
|
@ -1106,10 +1102,8 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss,
|
||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @offset: used to return the offset within the RAMBlock
|
* @offset: used to return the offset within the RAMBlock
|
||||||
* @page_abs: pointer into where to store the dirty page
|
|
||||||
*/
|
*/
|
||||||
static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset,
|
static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
|
||||||
unsigned long *page_abs)
|
|
||||||
{
|
{
|
||||||
RAMBlock *block = NULL;
|
RAMBlock *block = NULL;
|
||||||
|
|
||||||
|
@ -1119,7 +1113,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset,
|
||||||
QSIMPLEQ_FIRST(&rs->src_page_requests);
|
QSIMPLEQ_FIRST(&rs->src_page_requests);
|
||||||
block = entry->rb;
|
block = entry->rb;
|
||||||
*offset = entry->offset;
|
*offset = entry->offset;
|
||||||
*page_abs = (entry->offset + entry->rb->offset) >> TARGET_PAGE_BITS;
|
|
||||||
|
|
||||||
if (entry->len > TARGET_PAGE_SIZE) {
|
if (entry->len > TARGET_PAGE_SIZE) {
|
||||||
entry->len -= TARGET_PAGE_SIZE;
|
entry->len -= TARGET_PAGE_SIZE;
|
||||||
|
@ -1144,17 +1137,15 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset,
|
||||||
*
|
*
|
||||||
* @rs: current RAM state
|
* @rs: current RAM state
|
||||||
* @pss: data about the state of the current dirty page scan
|
* @pss: data about the state of the current dirty page scan
|
||||||
* @page_abs: pointer into where to store the dirty page
|
|
||||||
*/
|
*/
|
||||||
static bool get_queued_page(RAMState *rs, PageSearchStatus *pss,
|
static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
|
||||||
unsigned long *page_abs)
|
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
ram_addr_t offset;
|
ram_addr_t offset;
|
||||||
bool dirty;
|
bool dirty;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
block = unqueue_page(rs, &offset, page_abs);
|
block = unqueue_page(rs, &offset);
|
||||||
/*
|
/*
|
||||||
* We're sending this page, and since it's postcopy nothing else
|
* We're sending this page, and since it's postcopy nothing else
|
||||||
* will dirty it, and we must make sure it doesn't get sent again
|
* will dirty it, and we must make sure it doesn't get sent again
|
||||||
|
@ -1163,16 +1154,18 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss,
|
||||||
*/
|
*/
|
||||||
if (block) {
|
if (block) {
|
||||||
unsigned long *bitmap;
|
unsigned long *bitmap;
|
||||||
|
unsigned long page;
|
||||||
|
|
||||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
||||||
dirty = test_bit(*page_abs, bitmap);
|
page = (block->offset + offset) >> TARGET_PAGE_BITS;
|
||||||
|
dirty = test_bit(page, bitmap);
|
||||||
if (!dirty) {
|
if (!dirty) {
|
||||||
trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
|
trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
|
||||||
*page_abs,
|
page,
|
||||||
test_bit(*page_abs,
|
test_bit(page,
|
||||||
atomic_rcu_read(&rs->ram_bitmap)->unsentmap));
|
atomic_rcu_read(&rs->ram_bitmap)->unsentmap));
|
||||||
} else {
|
} else {
|
||||||
trace_get_queued_page(block->idstr, (uint64_t)offset,
|
trace_get_queued_page(block->idstr, (uint64_t)offset, page);
|
||||||
*page_abs);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1300,22 +1293,22 @@ err:
|
||||||
* @ms: current migration state
|
* @ms: current migration state
|
||||||
* @pss: data about the page we want to send
|
* @pss: data about the page we want to send
|
||||||
* @last_stage: if we are at the completion stage
|
* @last_stage: if we are at the completion stage
|
||||||
* @page_abs: page number of the dirty page
|
|
||||||
*/
|
*/
|
||||||
static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
|
static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
|
||||||
bool last_stage, unsigned long page_abs)
|
bool last_stage)
|
||||||
{
|
{
|
||||||
int res = 0;
|
int res = 0;
|
||||||
|
|
||||||
/* Check the pages is dirty and if it is send it */
|
/* Check the pages is dirty and if it is send it */
|
||||||
if (migration_bitmap_clear_dirty(rs, page_abs)) {
|
if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
|
||||||
unsigned long *unsentmap;
|
unsigned long *unsentmap;
|
||||||
/*
|
/*
|
||||||
* If xbzrle is on, stop using the data compression after first
|
* If xbzrle is on, stop using the data compression after first
|
||||||
* round of migration even if compression is enabled. In theory,
|
* round of migration even if compression is enabled. In theory,
|
||||||
* xbzrle can do better than compression.
|
* xbzrle can do better than compression.
|
||||||
*/
|
*/
|
||||||
|
unsigned long page =
|
||||||
|
(pss->block->offset >> TARGET_PAGE_BITS) + pss->page;
|
||||||
if (migrate_use_compression()
|
if (migrate_use_compression()
|
||||||
&& (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
|
&& (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
|
||||||
res = ram_save_compressed_page(rs, pss, last_stage);
|
res = ram_save_compressed_page(rs, pss, last_stage);
|
||||||
|
@ -1328,7 +1321,7 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
|
||||||
}
|
}
|
||||||
unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
|
unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
|
||||||
if (unsentmap) {
|
if (unsentmap) {
|
||||||
clear_bit(page_abs, unsentmap);
|
clear_bit(page, unsentmap);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1350,25 +1343,22 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
|
||||||
* @ms: current migration state
|
* @ms: current migration state
|
||||||
* @pss: data about the page we want to send
|
* @pss: data about the page we want to send
|
||||||
* @last_stage: if we are at the completion stage
|
* @last_stage: if we are at the completion stage
|
||||||
* @page_abs: Page number of the dirty page
|
|
||||||
*/
|
*/
|
||||||
static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
|
static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
|
||||||
bool last_stage,
|
bool last_stage)
|
||||||
unsigned long page_abs)
|
|
||||||
{
|
{
|
||||||
int tmppages, pages = 0;
|
int tmppages, pages = 0;
|
||||||
size_t pagesize_bits =
|
size_t pagesize_bits =
|
||||||
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
|
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
tmppages = ram_save_target_page(rs, pss, last_stage, page_abs);
|
tmppages = ram_save_target_page(rs, pss, last_stage);
|
||||||
if (tmppages < 0) {
|
if (tmppages < 0) {
|
||||||
return tmppages;
|
return tmppages;
|
||||||
}
|
}
|
||||||
|
|
||||||
pages += tmppages;
|
pages += tmppages;
|
||||||
pss->page++;
|
pss->page++;
|
||||||
page_abs++;
|
|
||||||
} while (pss->page & (pagesize_bits - 1));
|
} while (pss->page & (pagesize_bits - 1));
|
||||||
|
|
||||||
/* The offset we leave with is the last one we looked at */
|
/* The offset we leave with is the last one we looked at */
|
||||||
|
@ -1395,7 +1385,6 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage)
|
||||||
PageSearchStatus pss;
|
PageSearchStatus pss;
|
||||||
int pages = 0;
|
int pages = 0;
|
||||||
bool again, found;
|
bool again, found;
|
||||||
unsigned long page_abs; /* Page number of the dirty page */
|
|
||||||
|
|
||||||
/* No dirty page as there is zero RAM */
|
/* No dirty page as there is zero RAM */
|
||||||
if (!ram_bytes_total()) {
|
if (!ram_bytes_total()) {
|
||||||
|
@ -1412,15 +1401,15 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage)
|
||||||
|
|
||||||
do {
|
do {
|
||||||
again = true;
|
again = true;
|
||||||
found = get_queued_page(rs, &pss, &page_abs);
|
found = get_queued_page(rs, &pss);
|
||||||
|
|
||||||
if (!found) {
|
if (!found) {
|
||||||
/* priority queue empty, so just search for something dirty */
|
/* priority queue empty, so just search for something dirty */
|
||||||
found = find_dirty_block(rs, &pss, &again, &page_abs);
|
found = find_dirty_block(rs, &pss, &again);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (found) {
|
if (found) {
|
||||||
pages = ram_save_host_page(rs, &pss, last_stage, page_abs);
|
pages = ram_save_host_page(rs, &pss, last_stage);
|
||||||
}
|
}
|
||||||
} while (!pages && again);
|
} while (!pages && again);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue