mirror of https://github.com/xqemu/xqemu.git
xen mapcache: check if memory region has moved.
This patch changes the xen_map_cache behavior. Before trying to map a guest addr, mapcache will look into the list of range of address that have been moved (physmap/set_memory). There is currently one memory space like this, the vram, "moved" from were it's allocated to were the guest will look into. This help to have a succefull migration. Signed-off-by: Anthony PERARD <anthony.perard@citrix.com> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
This commit is contained in:
parent
d1814e08c0
commit
cd1ba7de23
18
xen-all.c
18
xen-all.c
|
@ -225,6 +225,22 @@ static XenPhysmap *get_physmapping(XenIOState *state,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static target_phys_addr_t xen_phys_offset_to_gaddr(target_phys_addr_t start_addr,
|
||||||
|
ram_addr_t size, void *opaque)
|
||||||
|
{
|
||||||
|
target_phys_addr_t addr = start_addr & TARGET_PAGE_MASK;
|
||||||
|
XenIOState *xen_io_state = opaque;
|
||||||
|
XenPhysmap *physmap = NULL;
|
||||||
|
|
||||||
|
QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
|
||||||
|
if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
|
||||||
|
return physmap->start_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return start_addr;
|
||||||
|
}
|
||||||
|
|
||||||
#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
|
#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
|
||||||
static int xen_add_to_physmap(XenIOState *state,
|
static int xen_add_to_physmap(XenIOState *state,
|
||||||
target_phys_addr_t start_addr,
|
target_phys_addr_t start_addr,
|
||||||
|
@ -1043,7 +1059,7 @@ int xen_hvm_init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Init RAM management */
|
/* Init RAM management */
|
||||||
xen_map_cache_init();
|
xen_map_cache_init(xen_phys_offset_to_gaddr, state);
|
||||||
xen_ram_init(ram_size);
|
xen_ram_init(ram_size);
|
||||||
|
|
||||||
qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
|
qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
|
||||||
|
|
|
@ -78,6 +78,9 @@ typedef struct MapCache {
|
||||||
uint8_t *last_address_vaddr;
|
uint8_t *last_address_vaddr;
|
||||||
unsigned long max_mcache_size;
|
unsigned long max_mcache_size;
|
||||||
unsigned int mcache_bucket_shift;
|
unsigned int mcache_bucket_shift;
|
||||||
|
|
||||||
|
phys_offset_to_gaddr_t phys_offset_to_gaddr;
|
||||||
|
void *opaque;
|
||||||
} MapCache;
|
} MapCache;
|
||||||
|
|
||||||
static MapCache *mapcache;
|
static MapCache *mapcache;
|
||||||
|
@ -91,13 +94,16 @@ static inline int test_bits(int nr, int size, const unsigned long *addr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void xen_map_cache_init(void)
|
void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
|
||||||
{
|
{
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
struct rlimit rlimit_as;
|
struct rlimit rlimit_as;
|
||||||
|
|
||||||
mapcache = g_malloc0(sizeof (MapCache));
|
mapcache = g_malloc0(sizeof (MapCache));
|
||||||
|
|
||||||
|
mapcache->phys_offset_to_gaddr = f;
|
||||||
|
mapcache->opaque = opaque;
|
||||||
|
|
||||||
QTAILQ_INIT(&mapcache->locked_entries);
|
QTAILQ_INIT(&mapcache->locked_entries);
|
||||||
mapcache->last_address_index = -1;
|
mapcache->last_address_index = -1;
|
||||||
|
|
||||||
|
@ -193,9 +199,14 @@ uint8_t *xen_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size,
|
||||||
uint8_t lock)
|
uint8_t lock)
|
||||||
{
|
{
|
||||||
MapCacheEntry *entry, *pentry = NULL;
|
MapCacheEntry *entry, *pentry = NULL;
|
||||||
target_phys_addr_t address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
|
target_phys_addr_t address_index;
|
||||||
target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
|
target_phys_addr_t address_offset;
|
||||||
target_phys_addr_t __size = size;
|
target_phys_addr_t __size = size;
|
||||||
|
bool translated = false;
|
||||||
|
|
||||||
|
tryagain:
|
||||||
|
address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
|
||||||
|
address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
|
||||||
|
|
||||||
trace_xen_map_cache(phys_addr);
|
trace_xen_map_cache(phys_addr);
|
||||||
|
|
||||||
|
@ -237,6 +248,11 @@ uint8_t *xen_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size,
|
||||||
if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
|
if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
|
||||||
entry->valid_mapping)) {
|
entry->valid_mapping)) {
|
||||||
mapcache->last_address_index = -1;
|
mapcache->last_address_index = -1;
|
||||||
|
if (!translated && mapcache->phys_offset_to_gaddr) {
|
||||||
|
phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque);
|
||||||
|
translated = true;
|
||||||
|
goto tryagain;
|
||||||
|
}
|
||||||
trace_xen_map_cache_return(NULL);
|
trace_xen_map_cache_return(NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,9 +11,13 @@
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
typedef target_phys_addr_t (*phys_offset_to_gaddr_t)(target_phys_addr_t start_addr,
|
||||||
|
ram_addr_t size,
|
||||||
|
void *opaque);
|
||||||
#ifdef CONFIG_XEN
|
#ifdef CONFIG_XEN
|
||||||
|
|
||||||
void xen_map_cache_init(void);
|
void xen_map_cache_init(phys_offset_to_gaddr_t f,
|
||||||
|
void *opaque);
|
||||||
uint8_t *xen_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size,
|
uint8_t *xen_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size,
|
||||||
uint8_t lock);
|
uint8_t lock);
|
||||||
ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
|
ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
|
||||||
|
@ -22,7 +26,8 @@ void xen_invalidate_map_cache(void);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline void xen_map_cache_init(void)
|
static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
|
||||||
|
void *opaque)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue