mirror of https://github.com/xemu-project/xemu.git
Introduce qemu_put_ram_ptr
This function allows to unlock a ram_ptr give by qemu_get_ram_ptr. After a call to qemu_put_ram_ptr, the pointer may be unmap from QEMU when used with Xen. Signed-off-by: Anthony PERARD <anthony.perard@citrix.com> Acked-by: Alexander Graf <agraf@suse.de> Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
ea6c5f8ffe
commit
050a0ddf39
|
@ -67,6 +67,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr);
|
|||
/* Same but slower, to use for migration, where the order of
|
||||
* RAMBlocks must not change. */
|
||||
void *qemu_safe_ram_ptr(ram_addr_t addr);
|
||||
void qemu_put_ram_ptr(void *addr);
|
||||
/* This should not be used by devices. */
|
||||
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
|
||||
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
|
||||
|
|
38
exec.c
38
exec.c
|
@ -3110,6 +3110,27 @@ void *qemu_safe_ram_ptr(ram_addr_t addr)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void qemu_put_ram_ptr(void *addr)
|
||||
{
|
||||
trace_qemu_put_ram_ptr(addr);
|
||||
|
||||
if (xen_mapcache_enabled()) {
|
||||
RAMBlock *block;
|
||||
|
||||
QLIST_FOREACH(block, &ram_list.blocks, next) {
|
||||
if (addr == block->host) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (block && block->host) {
|
||||
xen_unmap_block(block->host, block->length);
|
||||
block->host = NULL;
|
||||
} else {
|
||||
qemu_map_cache_unlock(addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
|
||||
{
|
||||
RAMBlock *block;
|
||||
|
@ -3825,6 +3846,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
|||
cpu_physical_memory_set_dirty_flags(
|
||||
addr1, (0xff & ~CODE_DIRTY_FLAG));
|
||||
}
|
||||
qemu_put_ram_ptr(ptr);
|
||||
}
|
||||
} else {
|
||||
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
|
||||
|
@ -3852,9 +3874,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
|||
}
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
|
||||
(addr & ~TARGET_PAGE_MASK);
|
||||
memcpy(buf, ptr, l);
|
||||
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
|
||||
memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
|
||||
qemu_put_ram_ptr(ptr);
|
||||
}
|
||||
}
|
||||
len -= l;
|
||||
|
@ -3895,6 +3917,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr,
|
|||
/* ROM/RAM case */
|
||||
ptr = qemu_get_ram_ptr(addr1);
|
||||
memcpy(ptr, buf, l);
|
||||
qemu_put_ram_ptr(ptr);
|
||||
}
|
||||
len -= l;
|
||||
buf += l;
|
||||
|
@ -4036,6 +4059,15 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
|
|||
access_len -= l;
|
||||
}
|
||||
}
|
||||
if (xen_mapcache_enabled()) {
|
||||
uint8_t *buffer1 = buffer;
|
||||
uint8_t *end_buffer = buffer + len;
|
||||
|
||||
while (buffer1 < end_buffer) {
|
||||
qemu_put_ram_ptr(buffer1);
|
||||
buffer1 += TARGET_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (is_write) {
|
||||
|
|
|
@ -371,3 +371,6 @@ disable qemu_remap_bucket(uint64_t index) "index %#"PRIx64""
|
|||
disable qemu_map_cache_return(void* ptr) "%p"
|
||||
disable xen_map_block(uint64_t phys_addr, uint64_t size) "%#"PRIx64", size %#"PRIx64""
|
||||
disable xen_unmap_block(void* addr, unsigned long size) "%p, size %#lx"
|
||||
|
||||
# exec.c
|
||||
disable qemu_put_ram_ptr(void* addr) "%p"
|
||||
|
|
|
@ -196,6 +196,39 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u
|
|||
return mapcache->last_address_vaddr + address_offset;
|
||||
}
|
||||
|
||||
void qemu_map_cache_unlock(void *buffer)
|
||||
{
|
||||
MapCacheEntry *entry = NULL, *pentry = NULL;
|
||||
MapCacheRev *reventry;
|
||||
target_phys_addr_t paddr_index;
|
||||
int found = 0;
|
||||
|
||||
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
|
||||
if (reventry->vaddr_req == buffer) {
|
||||
paddr_index = reventry->paddr_index;
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
return;
|
||||
}
|
||||
QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
|
||||
qemu_free(reventry);
|
||||
|
||||
entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
|
||||
while (entry && entry->paddr_index != paddr_index) {
|
||||
pentry = entry;
|
||||
entry = entry->next;
|
||||
}
|
||||
if (!entry) {
|
||||
return;
|
||||
}
|
||||
if (entry->lock > 0) {
|
||||
entry->lock--;
|
||||
}
|
||||
}
|
||||
|
||||
ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
|
||||
{
|
||||
MapCacheRev *reventry;
|
||||
|
|
Loading…
Reference in New Issue