2010-08-31 15:41:25 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011 Citrix Ltd.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
2012-01-13 16:44:23 +00:00
|
|
|
* Contributions after 2012-01-13 are licensed under the terms of the
|
|
|
|
* GNU GPL, version 2 or (at your option) any later version.
|
2010-08-31 15:41:25 +00:00
|
|
|
*/
|
|
|
|
|
2016-01-26 18:17:06 +00:00
|
|
|
#include "qemu/osdep.h"
|
2010-08-31 15:41:25 +00:00
|
|
|
|
|
|
|
#include <sys/resource.h>
|
|
|
|
|
2013-02-05 16:06:20 +00:00
|
|
|
#include "hw/xen/xen_backend.h"
|
2012-12-17 17:20:04 +00:00
|
|
|
#include "sysemu/blockdev.h"
|
2012-12-17 17:20:00 +00:00
|
|
|
#include "qemu/bitmap.h"
|
2010-08-31 15:41:25 +00:00
|
|
|
|
|
|
|
#include <xen/hvm/params.h>
|
|
|
|
|
2012-12-17 17:20:04 +00:00
|
|
|
#include "sysemu/xen-mapcache.h"
|
2017-04-05 23:21:31 +00:00
|
|
|
#include "trace.h"
|
2010-08-31 15:41:25 +00:00
|
|
|
|
|
|
|
|
|
|
|
//#define MAPCACHE_DEBUG
|
|
|
|
|
|
|
|
#ifdef MAPCACHE_DEBUG
|
|
|
|
# define DPRINTF(fmt, ...) do { \
|
|
|
|
fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
# define DPRINTF(fmt, ...) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2013-12-18 19:17:32 +00:00
|
|
|
#if HOST_LONG_BITS == 32
|
2010-08-31 15:41:25 +00:00
|
|
|
# define MCACHE_BUCKET_SHIFT 16
|
2011-03-22 14:50:28 +00:00
|
|
|
# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
|
2013-12-18 19:17:32 +00:00
|
|
|
#else
|
2010-08-31 15:41:25 +00:00
|
|
|
# define MCACHE_BUCKET_SHIFT 20
|
2011-03-22 14:50:28 +00:00
|
|
|
# define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
|
2010-08-31 15:41:25 +00:00
|
|
|
#endif
|
|
|
|
#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
|
|
|
|
|
2011-09-09 12:50:18 +00:00
|
|
|
/* This is the size of the virtual address space reserve to QEMU that will not
|
|
|
|
* be use by MapCache.
|
|
|
|
* From empirical tests I observed that qemu use 75MB more than the
|
|
|
|
* max_mcache_size.
|
|
|
|
*/
|
|
|
|
#define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024)
|
|
|
|
|
2010-08-31 15:41:25 +00:00
|
|
|
typedef struct MapCacheEntry {
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr paddr_index;
|
2010-08-31 15:41:25 +00:00
|
|
|
uint8_t *vaddr_base;
|
2011-05-19 17:35:42 +00:00
|
|
|
unsigned long *valid_mapping;
|
2010-08-31 15:41:25 +00:00
|
|
|
uint8_t lock;
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr size;
|
2010-08-31 15:41:25 +00:00
|
|
|
struct MapCacheEntry *next;
|
|
|
|
} MapCacheEntry;
|
|
|
|
|
|
|
|
typedef struct MapCacheRev {
|
|
|
|
uint8_t *vaddr_req;
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr paddr_index;
|
|
|
|
hwaddr size;
|
2010-08-31 15:41:25 +00:00
|
|
|
QTAILQ_ENTRY(MapCacheRev) next;
|
xen/mapcache: store dma information in revmapcache entries for debugging
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
>From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
2017-05-03 21:00:35 +00:00
|
|
|
bool dma;
|
2010-08-31 15:41:25 +00:00
|
|
|
} MapCacheRev;
|
|
|
|
|
|
|
|
typedef struct MapCache {
|
|
|
|
MapCacheEntry *entry;
|
|
|
|
unsigned long nr_buckets;
|
|
|
|
QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
|
|
|
|
|
|
|
|
/* For most cases (>99.9%), the page address is the same. */
|
2013-04-02 13:23:40 +00:00
|
|
|
MapCacheEntry *last_entry;
|
2010-08-31 15:41:25 +00:00
|
|
|
unsigned long max_mcache_size;
|
|
|
|
unsigned int mcache_bucket_shift;
|
2012-01-18 12:21:38 +00:00
|
|
|
|
|
|
|
phys_offset_to_gaddr_t phys_offset_to_gaddr;
|
2015-01-14 10:20:56 +00:00
|
|
|
QemuMutex lock;
|
2012-01-18 12:21:38 +00:00
|
|
|
void *opaque;
|
2010-08-31 15:41:25 +00:00
|
|
|
} MapCache;
|
|
|
|
|
|
|
|
static MapCache *mapcache;
|
|
|
|
|
2015-01-14 10:20:56 +00:00
|
|
|
static inline void mapcache_lock(void)
|
|
|
|
{
|
|
|
|
qemu_mutex_lock(&mapcache->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void mapcache_unlock(void)
|
|
|
|
{
|
|
|
|
qemu_mutex_unlock(&mapcache->lock);
|
|
|
|
}
|
|
|
|
|
2011-05-19 17:35:42 +00:00
|
|
|
static inline int test_bits(int nr, int size, const unsigned long *addr)
|
|
|
|
{
|
|
|
|
unsigned long res = find_next_zero_bit(addr, size + nr, nr);
|
|
|
|
if (res >= nr + size)
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-01-18 12:21:38 +00:00
|
|
|
void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
|
2010-08-31 15:41:25 +00:00
|
|
|
{
|
|
|
|
unsigned long size;
|
|
|
|
struct rlimit rlimit_as;
|
|
|
|
|
2011-08-21 03:09:37 +00:00
|
|
|
mapcache = g_malloc0(sizeof (MapCache));
|
2010-08-31 15:41:25 +00:00
|
|
|
|
2012-01-18 12:21:38 +00:00
|
|
|
mapcache->phys_offset_to_gaddr = f;
|
|
|
|
mapcache->opaque = opaque;
|
2015-01-14 10:20:56 +00:00
|
|
|
qemu_mutex_init(&mapcache->lock);
|
2012-01-18 12:21:38 +00:00
|
|
|
|
2010-08-31 15:41:25 +00:00
|
|
|
QTAILQ_INIT(&mapcache->locked_entries);
|
|
|
|
|
2011-09-09 12:50:18 +00:00
|
|
|
if (geteuid() == 0) {
|
|
|
|
rlimit_as.rlim_cur = RLIM_INFINITY;
|
|
|
|
rlimit_as.rlim_max = RLIM_INFINITY;
|
|
|
|
mapcache->max_mcache_size = MCACHE_MAX_SIZE;
|
2011-03-22 14:50:28 +00:00
|
|
|
} else {
|
2011-09-09 12:50:18 +00:00
|
|
|
getrlimit(RLIMIT_AS, &rlimit_as);
|
|
|
|
rlimit_as.rlim_cur = rlimit_as.rlim_max;
|
|
|
|
|
|
|
|
if (rlimit_as.rlim_max != RLIM_INFINITY) {
|
|
|
|
fprintf(stderr, "Warning: QEMU's maximum size of virtual"
|
|
|
|
" memory is not infinity.\n");
|
|
|
|
}
|
|
|
|
if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) {
|
|
|
|
mapcache->max_mcache_size = rlimit_as.rlim_max -
|
|
|
|
NON_MCACHE_MEMORY_SIZE;
|
|
|
|
} else {
|
|
|
|
mapcache->max_mcache_size = MCACHE_MAX_SIZE;
|
|
|
|
}
|
2011-03-22 14:50:28 +00:00
|
|
|
}
|
|
|
|
|
2010-08-31 15:41:25 +00:00
|
|
|
setrlimit(RLIMIT_AS, &rlimit_as);
|
|
|
|
|
|
|
|
mapcache->nr_buckets =
|
|
|
|
(((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
|
|
|
|
(1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
|
|
|
|
(MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
|
|
|
|
|
|
|
|
size = mapcache->nr_buckets * sizeof (MapCacheEntry);
|
|
|
|
size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
|
2011-06-21 20:59:08 +00:00
|
|
|
DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__,
|
|
|
|
mapcache->nr_buckets, size);
|
2011-08-21 03:09:37 +00:00
|
|
|
mapcache->entry = g_malloc0(size);
|
2010-08-31 15:41:25 +00:00
|
|
|
}
|
|
|
|
|
2011-06-21 20:59:08 +00:00
|
|
|
static void xen_remap_bucket(MapCacheEntry *entry,
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr size,
|
|
|
|
hwaddr address_index)
|
2010-08-31 15:41:25 +00:00
|
|
|
{
|
|
|
|
uint8_t *vaddr_base;
|
|
|
|
xen_pfn_t *pfns;
|
|
|
|
int *err;
|
2011-03-22 14:50:28 +00:00
|
|
|
unsigned int i;
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
|
2010-08-31 15:41:25 +00:00
|
|
|
|
2011-06-21 20:59:08 +00:00
|
|
|
trace_xen_remap_bucket(address_index);
|
2010-08-31 15:41:25 +00:00
|
|
|
|
2011-08-21 03:09:37 +00:00
|
|
|
pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t));
|
|
|
|
err = g_malloc0(nb_pfn * sizeof (int));
|
2010-08-31 15:41:25 +00:00
|
|
|
|
|
|
|
if (entry->vaddr_base != NULL) {
|
2016-12-20 16:31:36 +00:00
|
|
|
ram_block_notify_remove(entry->vaddr_base, entry->size);
|
2011-05-19 17:35:42 +00:00
|
|
|
if (munmap(entry->vaddr_base, entry->size) != 0) {
|
2010-08-31 15:41:25 +00:00
|
|
|
perror("unmap fails");
|
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
}
|
2015-08-26 12:02:53 +00:00
|
|
|
g_free(entry->valid_mapping);
|
|
|
|
entry->valid_mapping = NULL;
|
2010-08-31 15:41:25 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nb_pfn; i++) {
|
|
|
|
pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
|
|
|
|
}
|
|
|
|
|
xen: Switch uses of xc_map_foreign_{pages,bulk} to use libxenforeignmemory API.
In Xen 4.7 we are refactoring parts libxenctrl into a number of
separate libraries which will provide backward and forward API and ABI
compatiblity.
One such library will be libxenforeignmemory which provides access to
privileged foreign mappings and which will provide an interface
equivalent to xc_map_foreign_{pages,bulk}.
The new xenforeignmemory_map() function behaves like
xc_map_foreign_pages() when the err argument is NULL and like
xc_map_foreign_bulk() when err is non-NULL, which maps into the shim
here onto checking err == NULL and calling the appropriate old
function.
Note that xenforeignmemory_map() takes the number of pages before the
arrays themselves, in order to support potentially future use of
variable-length-arrays in the prototype (in the future, when Xen's
baseline toolchain requirements are new enough to ensure VLAs are
supported).
In preparation for adding support for libxenforeignmemory add support
to the <=4.0 and <=4.6 compat code in xen_common.h to allow us to
switch to using the new API. These shims will disappear for versions
of Xen which include libxenforeignmemory.
Since libxenforeignmemory will have its own handle type but for <= 4.6
the functionality is provided by using a libxenctrl handle we
introduce a new global xen_fmem alongside the existing xen_xc. In fact
we make xen_fmem a pointer to the existing xen_xc, which then works
correctly with both <=4.0 (xc handle is an int) and <=4.6 (xc handle
is a pointer). In the latter case xen_fmem is actually a double
indirect pointer, but it all falls out in the wash.
Unlike libxenctrl libxenforeignmemory has an explicit unmap function,
rather than just specifying that munmap should be used, so the unmap
paths are updated to use xenforeignmemory_unmap, which is a shim for
munmap on these versions of xen. The mappings in xen-hvm.c do not
appear to be unmapped (which makes sense for a qemu-dm process)
In fb_disconnect this results in a change from simply mmap over the
existing mapping (with an implicit munmap) to expliclty unmapping with
xenforeignmemory_unmap and then mapping the required anonymous memory
in the same hole. I don't think this is a problem since any other
thread which was racily touching this region would already be running
the risk of hitting the mapping halfway through the call. If this is
thought to be a problem then we could consider adding an extra API to
the libxenforeignmemory interface to replace a foreign mapping with
anonymous shared memory, but I'd prefer not to.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
2016-01-15 13:23:41 +00:00
|
|
|
vaddr_base = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
|
|
|
|
nb_pfn, pfns, err);
|
2010-08-31 15:41:25 +00:00
|
|
|
if (vaddr_base == NULL) {
|
xen: Switch uses of xc_map_foreign_{pages,bulk} to use libxenforeignmemory API.
In Xen 4.7 we are refactoring parts libxenctrl into a number of
separate libraries which will provide backward and forward API and ABI
compatiblity.
One such library will be libxenforeignmemory which provides access to
privileged foreign mappings and which will provide an interface
equivalent to xc_map_foreign_{pages,bulk}.
The new xenforeignmemory_map() function behaves like
xc_map_foreign_pages() when the err argument is NULL and like
xc_map_foreign_bulk() when err is non-NULL, which maps into the shim
here onto checking err == NULL and calling the appropriate old
function.
Note that xenforeignmemory_map() takes the number of pages before the
arrays themselves, in order to support potentially future use of
variable-length-arrays in the prototype (in the future, when Xen's
baseline toolchain requirements are new enough to ensure VLAs are
supported).
In preparation for adding support for libxenforeignmemory add support
to the <=4.0 and <=4.6 compat code in xen_common.h to allow us to
switch to using the new API. These shims will disappear for versions
of Xen which include libxenforeignmemory.
Since libxenforeignmemory will have its own handle type but for <= 4.6
the functionality is provided by using a libxenctrl handle we
introduce a new global xen_fmem alongside the existing xen_xc. In fact
we make xen_fmem a pointer to the existing xen_xc, which then works
correctly with both <=4.0 (xc handle is an int) and <=4.6 (xc handle
is a pointer). In the latter case xen_fmem is actually a double
indirect pointer, but it all falls out in the wash.
Unlike libxenctrl libxenforeignmemory has an explicit unmap function,
rather than just specifying that munmap should be used, so the unmap
paths are updated to use xenforeignmemory_unmap, which is a shim for
munmap on these versions of xen. The mappings in xen-hvm.c do not
appear to be unmapped (which makes sense for a qemu-dm process)
In fb_disconnect this results in a change from simply mmap over the
existing mapping (with an implicit munmap) to expliclty unmapping with
xenforeignmemory_unmap and then mapping the required anonymous memory
in the same hole. I don't think this is a problem since any other
thread which was racily touching this region would already be running
the risk of hitting the mapping halfway through the call. If this is
thought to be a problem then we could consider adding an extra API to
the libxenforeignmemory interface to replace a foreign mapping with
anonymous shared memory, but I'd prefer not to.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
2016-01-15 13:23:41 +00:00
|
|
|
perror("xenforeignmemory_map");
|
2010-08-31 15:41:25 +00:00
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->vaddr_base = vaddr_base;
|
|
|
|
entry->paddr_index = address_index;
|
2011-05-19 17:35:42 +00:00
|
|
|
entry->size = size;
|
2011-08-21 03:09:37 +00:00
|
|
|
entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) *
|
2011-05-19 17:35:42 +00:00
|
|
|
BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
|
2010-08-31 15:41:25 +00:00
|
|
|
|
2016-12-20 16:31:36 +00:00
|
|
|
ram_block_notify_add(entry->vaddr_base, entry->size);
|
2011-03-22 14:50:28 +00:00
|
|
|
bitmap_zero(entry->valid_mapping, nb_pfn);
|
|
|
|
for (i = 0; i < nb_pfn; i++) {
|
|
|
|
if (!err[i]) {
|
|
|
|
bitmap_set(entry->valid_mapping, i, 1);
|
2010-08-31 15:41:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-21 03:09:37 +00:00
|
|
|
g_free(pfns);
|
|
|
|
g_free(err);
|
2010-08-31 15:41:25 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 10:20:56 +00:00
|
|
|
static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
|
xen/mapcache: store dma information in revmapcache entries for debugging
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
>From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
2017-05-03 21:00:35 +00:00
|
|
|
uint8_t lock, bool dma)
|
2010-08-31 15:41:25 +00:00
|
|
|
{
|
|
|
|
MapCacheEntry *entry, *pentry = NULL;
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr address_index;
|
|
|
|
hwaddr address_offset;
|
2015-01-14 10:20:55 +00:00
|
|
|
hwaddr cache_size = size;
|
|
|
|
hwaddr test_bit_size;
|
2012-01-18 12:21:38 +00:00
|
|
|
bool translated = false;
|
|
|
|
|
|
|
|
tryagain:
|
|
|
|
address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
|
|
|
|
address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
|
2010-08-31 15:41:25 +00:00
|
|
|
|
2011-06-21 20:59:08 +00:00
|
|
|
trace_xen_map_cache(phys_addr);
|
2010-08-31 15:41:25 +00:00
|
|
|
|
2015-01-14 10:20:55 +00:00
|
|
|
/* test_bit_size is always a multiple of XC_PAGE_SIZE */
|
2013-04-02 13:22:41 +00:00
|
|
|
if (size) {
|
2015-01-14 10:20:55 +00:00
|
|
|
test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1));
|
2013-04-02 13:22:41 +00:00
|
|
|
|
2015-01-14 10:20:55 +00:00
|
|
|
if (test_bit_size % XC_PAGE_SIZE) {
|
|
|
|
test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
|
2013-04-02 13:22:41 +00:00
|
|
|
}
|
|
|
|
} else {
|
2015-01-14 10:20:55 +00:00
|
|
|
test_bit_size = XC_PAGE_SIZE;
|
2013-04-02 13:22:41 +00:00
|
|
|
}
|
|
|
|
|
2013-04-02 13:23:40 +00:00
|
|
|
if (mapcache->last_entry != NULL &&
|
|
|
|
mapcache->last_entry->paddr_index == address_index &&
|
2015-01-14 10:20:55 +00:00
|
|
|
!lock && !size &&
|
2013-04-02 13:22:41 +00:00
|
|
|
test_bits(address_offset >> XC_PAGE_SHIFT,
|
2015-01-14 10:20:55 +00:00
|
|
|
test_bit_size >> XC_PAGE_SHIFT,
|
2013-04-02 13:22:41 +00:00
|
|
|
mapcache->last_entry->valid_mapping)) {
|
2013-04-02 13:23:40 +00:00
|
|
|
trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
|
|
|
|
return mapcache->last_entry->vaddr_base + address_offset;
|
2010-08-31 15:41:25 +00:00
|
|
|
}
|
|
|
|
|
2011-05-19 17:35:42 +00:00
|
|
|
/* size is always a multiple of MCACHE_BUCKET_SIZE */
|
2012-04-13 17:18:56 +00:00
|
|
|
if (size) {
|
2015-01-14 10:20:55 +00:00
|
|
|
cache_size = size + address_offset;
|
|
|
|
if (cache_size % MCACHE_BUCKET_SIZE) {
|
|
|
|
cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE);
|
2012-04-13 17:18:56 +00:00
|
|
|
}
|
|
|
|
} else {
|
2015-01-14 10:20:55 +00:00
|
|
|
cache_size = MCACHE_BUCKET_SIZE;
|
2012-04-13 17:18:56 +00:00
|
|
|
}
|
2011-05-19 17:35:42 +00:00
|
|
|
|
2010-08-31 15:41:25 +00:00
|
|
|
entry = &mapcache->entry[address_index % mapcache->nr_buckets];
|
|
|
|
|
2011-05-19 17:35:42 +00:00
|
|
|
while (entry && entry->lock && entry->vaddr_base &&
|
2015-01-14 10:20:55 +00:00
|
|
|
(entry->paddr_index != address_index || entry->size != cache_size ||
|
2013-04-02 13:22:41 +00:00
|
|
|
!test_bits(address_offset >> XC_PAGE_SHIFT,
|
2015-01-14 10:20:55 +00:00
|
|
|
test_bit_size >> XC_PAGE_SHIFT,
|
2011-05-19 17:35:42 +00:00
|
|
|
entry->valid_mapping))) {
|
2010-08-31 15:41:25 +00:00
|
|
|
pentry = entry;
|
|
|
|
entry = entry->next;
|
|
|
|
}
|
|
|
|
if (!entry) {
|
2011-08-21 03:09:37 +00:00
|
|
|
entry = g_malloc0(sizeof (MapCacheEntry));
|
2010-08-31 15:41:25 +00:00
|
|
|
pentry->next = entry;
|
2015-01-14 10:20:55 +00:00
|
|
|
xen_remap_bucket(entry, cache_size, address_index);
|
2010-08-31 15:41:25 +00:00
|
|
|
} else if (!entry->lock) {
|
|
|
|
if (!entry->vaddr_base || entry->paddr_index != address_index ||
|
2015-01-14 10:20:55 +00:00
|
|
|
entry->size != cache_size ||
|
2013-04-02 13:22:41 +00:00
|
|
|
!test_bits(address_offset >> XC_PAGE_SHIFT,
|
2015-01-14 10:20:55 +00:00
|
|
|
test_bit_size >> XC_PAGE_SHIFT,
|
2011-05-19 17:35:42 +00:00
|
|
|
entry->valid_mapping)) {
|
2015-01-14 10:20:55 +00:00
|
|
|
xen_remap_bucket(entry, cache_size, address_index);
|
2010-08-31 15:41:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-02 13:22:41 +00:00
|
|
|
if(!test_bits(address_offset >> XC_PAGE_SHIFT,
|
2015-01-14 10:20:55 +00:00
|
|
|
test_bit_size >> XC_PAGE_SHIFT,
|
2011-05-19 17:35:42 +00:00
|
|
|
entry->valid_mapping)) {
|
2013-04-02 13:23:40 +00:00
|
|
|
mapcache->last_entry = NULL;
|
2012-01-18 12:21:38 +00:00
|
|
|
if (!translated && mapcache->phys_offset_to_gaddr) {
|
|
|
|
phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque);
|
|
|
|
translated = true;
|
|
|
|
goto tryagain;
|
|
|
|
}
|
2011-06-21 20:59:08 +00:00
|
|
|
trace_xen_map_cache_return(NULL);
|
2010-08-31 15:41:25 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-04-02 13:23:40 +00:00
|
|
|
mapcache->last_entry = entry;
|
2010-08-31 15:41:25 +00:00
|
|
|
if (lock) {
|
2011-08-21 03:09:37 +00:00
|
|
|
MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
|
2010-08-31 15:41:25 +00:00
|
|
|
entry->lock++;
|
xen/mapcache: store dma information in revmapcache entries for debugging
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
>From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
2017-05-03 21:00:35 +00:00
|
|
|
reventry->dma = dma;
|
2013-04-02 13:23:40 +00:00
|
|
|
reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
|
|
|
|
reventry->paddr_index = mapcache->last_entry->paddr_index;
|
2011-05-19 17:35:42 +00:00
|
|
|
reventry->size = entry->size;
|
2010-08-31 15:41:25 +00:00
|
|
|
QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
|
|
|
|
}
|
|
|
|
|
2013-04-02 13:23:40 +00:00
|
|
|
trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
|
|
|
|
return mapcache->last_entry->vaddr_base + address_offset;
|
2010-08-31 15:41:25 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 10:20:56 +00:00
|
|
|
uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
|
xen/mapcache: store dma information in revmapcache entries for debugging
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
>From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
2017-05-03 21:00:35 +00:00
|
|
|
uint8_t lock, bool dma)
|
2015-01-14 10:20:56 +00:00
|
|
|
{
|
|
|
|
uint8_t *p;
|
|
|
|
|
|
|
|
mapcache_lock();
|
xen/mapcache: store dma information in revmapcache entries for debugging
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
>From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
2017-05-03 21:00:35 +00:00
|
|
|
p = xen_map_cache_unlocked(phys_addr, size, lock, dma);
|
2015-01-14 10:20:56 +00:00
|
|
|
mapcache_unlock();
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2011-06-21 20:59:08 +00:00
|
|
|
ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
|
2010-08-31 15:41:25 +00:00
|
|
|
{
|
2011-07-26 14:33:11 +00:00
|
|
|
MapCacheEntry *entry = NULL;
|
2010-08-31 15:41:25 +00:00
|
|
|
MapCacheRev *reventry;
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr paddr_index;
|
|
|
|
hwaddr size;
|
2015-01-14 10:20:56 +00:00
|
|
|
ram_addr_t raddr;
|
2010-08-31 15:41:25 +00:00
|
|
|
int found = 0;
|
|
|
|
|
2015-01-14 10:20:56 +00:00
|
|
|
mapcache_lock();
|
2010-08-31 15:41:25 +00:00
|
|
|
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
|
|
|
|
if (reventry->vaddr_req == ptr) {
|
|
|
|
paddr_index = reventry->paddr_index;
|
2011-05-19 17:35:42 +00:00
|
|
|
size = reventry->size;
|
2010-08-31 15:41:25 +00:00
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
2011-06-21 20:59:08 +00:00
|
|
|
fprintf(stderr, "%s, could not find %p\n", __func__, ptr);
|
2010-08-31 15:41:25 +00:00
|
|
|
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
|
|
|
|
DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
|
|
|
|
reventry->vaddr_req);
|
|
|
|
}
|
|
|
|
abort();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-05-19 17:35:42 +00:00
|
|
|
entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
|
|
|
|
while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
|
|
|
|
entry = entry->next;
|
|
|
|
}
|
|
|
|
if (!entry) {
|
|
|
|
DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr);
|
2015-01-14 10:20:56 +00:00
|
|
|
raddr = 0;
|
|
|
|
} else {
|
|
|
|
raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
|
|
|
|
((unsigned long) ptr - (unsigned long) entry->vaddr_base);
|
2011-05-19 17:35:42 +00:00
|
|
|
}
|
2015-01-14 10:20:56 +00:00
|
|
|
mapcache_unlock();
|
|
|
|
return raddr;
|
2010-08-31 15:41:25 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 10:20:56 +00:00
|
|
|
static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer)
|
2010-08-31 15:41:25 +00:00
|
|
|
{
|
|
|
|
MapCacheEntry *entry = NULL, *pentry = NULL;
|
|
|
|
MapCacheRev *reventry;
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr paddr_index;
|
|
|
|
hwaddr size;
|
2010-08-31 15:41:25 +00:00
|
|
|
int found = 0;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
|
|
|
|
if (reventry->vaddr_req == buffer) {
|
|
|
|
paddr_index = reventry->paddr_index;
|
2011-05-19 17:35:42 +00:00
|
|
|
size = reventry->size;
|
2010-08-31 15:41:25 +00:00
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
2011-06-21 20:59:08 +00:00
|
|
|
DPRINTF("%s, could not find %p\n", __func__, buffer);
|
2010-08-31 15:41:25 +00:00
|
|
|
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
|
|
|
|
DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
|
2011-08-21 03:09:37 +00:00
|
|
|
g_free(reventry);
|
2010-08-31 15:41:25 +00:00
|
|
|
|
2013-04-02 13:23:40 +00:00
|
|
|
if (mapcache->last_entry != NULL &&
|
|
|
|
mapcache->last_entry->paddr_index == paddr_index) {
|
|
|
|
mapcache->last_entry = NULL;
|
2012-08-22 10:17:04 +00:00
|
|
|
}
|
|
|
|
|
2010-08-31 15:41:25 +00:00
|
|
|
entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
|
2011-05-19 17:35:42 +00:00
|
|
|
while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
|
2010-08-31 15:41:25 +00:00
|
|
|
pentry = entry;
|
|
|
|
entry = entry->next;
|
|
|
|
}
|
|
|
|
if (!entry) {
|
|
|
|
DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
entry->lock--;
|
|
|
|
if (entry->lock > 0 || pentry == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pentry->next = entry->next;
|
2016-12-20 16:31:36 +00:00
|
|
|
ram_block_notify_remove(entry->vaddr_base, entry->size);
|
2011-05-19 17:35:42 +00:00
|
|
|
if (munmap(entry->vaddr_base, entry->size) != 0) {
|
2010-08-31 15:41:25 +00:00
|
|
|
perror("unmap fails");
|
|
|
|
exit(-1);
|
|
|
|
}
|
2011-08-21 03:09:37 +00:00
|
|
|
g_free(entry->valid_mapping);
|
|
|
|
g_free(entry);
|
2010-08-31 15:41:25 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 10:20:56 +00:00
|
|
|
void xen_invalidate_map_cache_entry(uint8_t *buffer)
|
|
|
|
{
|
|
|
|
mapcache_lock();
|
|
|
|
xen_invalidate_map_cache_entry_unlocked(buffer);
|
|
|
|
mapcache_unlock();
|
|
|
|
}
|
|
|
|
|
2011-06-21 20:59:08 +00:00
|
|
|
void xen_invalidate_map_cache(void)
|
2010-08-31 15:41:25 +00:00
|
|
|
{
|
|
|
|
unsigned long i;
|
|
|
|
MapCacheRev *reventry;
|
|
|
|
|
|
|
|
/* Flush pending AIO before destroying the mapcache */
|
2011-11-30 12:23:43 +00:00
|
|
|
bdrv_drain_all();
|
2010-08-31 15:41:25 +00:00
|
|
|
|
2015-01-14 10:20:56 +00:00
|
|
|
mapcache_lock();
|
|
|
|
|
2010-08-31 15:41:25 +00:00
|
|
|
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
|
xen/mapcache: store dma information in revmapcache entries for debugging
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
>From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
2017-05-03 21:00:35 +00:00
|
|
|
if (!reventry->dma) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
fprintf(stderr, "Locked DMA mapping while invalidating mapcache!"
|
|
|
|
" "TARGET_FMT_plx" -> %p is present\n",
|
2010-08-31 15:41:25 +00:00
|
|
|
reventry->paddr_index, reventry->vaddr_req);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < mapcache->nr_buckets; i++) {
|
|
|
|
MapCacheEntry *entry = &mapcache->entry[i];
|
|
|
|
|
|
|
|
if (entry->vaddr_base == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
2012-04-13 17:33:02 +00:00
|
|
|
if (entry->lock > 0) {
|
|
|
|
continue;
|
|
|
|
}
|
2010-08-31 15:41:25 +00:00
|
|
|
|
2011-05-19 17:35:42 +00:00
|
|
|
if (munmap(entry->vaddr_base, entry->size) != 0) {
|
2010-08-31 15:41:25 +00:00
|
|
|
perror("unmap fails");
|
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->paddr_index = 0;
|
|
|
|
entry->vaddr_base = NULL;
|
2011-05-19 17:35:42 +00:00
|
|
|
entry->size = 0;
|
2011-08-21 03:09:37 +00:00
|
|
|
g_free(entry->valid_mapping);
|
2011-05-19 17:35:42 +00:00
|
|
|
entry->valid_mapping = NULL;
|
2010-08-31 15:41:25 +00:00
|
|
|
}
|
|
|
|
|
2013-04-02 13:23:40 +00:00
|
|
|
mapcache->last_entry = NULL;
|
2010-08-31 15:41:25 +00:00
|
|
|
|
|
|
|
mapcache_unlock();
|
|
|
|
}
|