mirror of https://github.com/xemu-project/xemu.git
accel/tcg: Move PageDesc tree into tb-maint.c for system
Now that PageDesc is not used for user-only, and for system it is only used for tb maintenance, move the implementation into tb-main.c appropriately ifdefed. We have not yet eliminated all references to PageDesc for user-only, so retain a typedef to the structure without definition. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
67ff2186b0
commit
babcbc220b
|
@ -23,51 +23,13 @@
|
||||||
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
|
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct PageDesc {
|
typedef struct PageDesc PageDesc;
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
struct PageDesc {
|
||||||
QemuSpin lock;
|
QemuSpin lock;
|
||||||
/* list of TBs intersecting this ram page */
|
/* list of TBs intersecting this ram page */
|
||||||
uintptr_t first_tb;
|
uintptr_t first_tb;
|
||||||
#endif
|
};
|
||||||
} PageDesc;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In system mode we want L1_MAP to be based on ram offsets,
|
|
||||||
* while in user mode we want it to be based on virtual addresses.
|
|
||||||
*
|
|
||||||
* TODO: For user mode, see the caveat re host vs guest virtual
|
|
||||||
* address spaces near GUEST_ADDR_MAX.
|
|
||||||
*/
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
|
||||||
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
|
|
||||||
# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
|
|
||||||
#else
|
|
||||||
# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
|
|
||||||
#endif
|
|
||||||
#else
|
|
||||||
# define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Size of the L2 (and L3, etc) page tables. */
|
|
||||||
#define V_L2_BITS 10
|
|
||||||
#define V_L2_SIZE (1 << V_L2_BITS)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* L1 Mapping properties
|
|
||||||
*/
|
|
||||||
extern int v_l1_size;
|
|
||||||
extern int v_l1_shift;
|
|
||||||
extern int v_l2_levels;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The bottom level has pointers to PageDesc, and is indexed by
|
|
||||||
* anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
|
|
||||||
*/
|
|
||||||
#define V_L1_MIN_BITS 4
|
|
||||||
#define V_L1_MAX_BITS (V_L2_BITS + 3)
|
|
||||||
#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
|
|
||||||
|
|
||||||
extern void *l1_map[V_L1_MAX_SIZE];
|
|
||||||
|
|
||||||
PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc);
|
PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc);
|
||||||
|
|
||||||
|
@ -76,6 +38,11 @@ static inline PageDesc *page_find(tb_page_addr_t index)
|
||||||
return page_find_alloc(index, false);
|
return page_find_alloc(index, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void page_table_config_init(void);
|
||||||
|
#else
|
||||||
|
static inline void page_table_config_init(void) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
/* list iterators for lists of tagged pointers in TranslationBlock */
|
/* list iterators for lists of tagged pointers in TranslationBlock */
|
||||||
#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
|
#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
|
||||||
for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
|
for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
|
||||||
|
|
|
@ -127,6 +127,111 @@ static PageForEachNext foreach_tb_next(PageForEachNext tb,
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
/*
|
||||||
|
* In system mode we want L1_MAP to be based on ram offsets.
|
||||||
|
*/
|
||||||
|
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
|
||||||
|
# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
|
||||||
|
#else
|
||||||
|
# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Size of the L2 (and L3, etc) page tables. */
|
||||||
|
#define V_L2_BITS 10
|
||||||
|
#define V_L2_SIZE (1 << V_L2_BITS)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* L1 Mapping properties
|
||||||
|
*/
|
||||||
|
static int v_l1_size;
|
||||||
|
static int v_l1_shift;
|
||||||
|
static int v_l2_levels;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The bottom level has pointers to PageDesc, and is indexed by
|
||||||
|
* anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
|
||||||
|
*/
|
||||||
|
#define V_L1_MIN_BITS 4
|
||||||
|
#define V_L1_MAX_BITS (V_L2_BITS + 3)
|
||||||
|
#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
|
||||||
|
|
||||||
|
static void *l1_map[V_L1_MAX_SIZE];
|
||||||
|
|
||||||
|
void page_table_config_init(void)
|
||||||
|
{
|
||||||
|
uint32_t v_l1_bits;
|
||||||
|
|
||||||
|
assert(TARGET_PAGE_BITS);
|
||||||
|
/* The bits remaining after N lower levels of page tables. */
|
||||||
|
v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
|
||||||
|
if (v_l1_bits < V_L1_MIN_BITS) {
|
||||||
|
v_l1_bits += V_L2_BITS;
|
||||||
|
}
|
||||||
|
|
||||||
|
v_l1_size = 1 << v_l1_bits;
|
||||||
|
v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
|
||||||
|
v_l2_levels = v_l1_shift / V_L2_BITS - 1;
|
||||||
|
|
||||||
|
assert(v_l1_bits <= V_L1_MAX_BITS);
|
||||||
|
assert(v_l1_shift % V_L2_BITS == 0);
|
||||||
|
assert(v_l2_levels >= 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
|
||||||
|
{
|
||||||
|
PageDesc *pd;
|
||||||
|
void **lp;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* Level 1. Always allocated. */
|
||||||
|
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
||||||
|
|
||||||
|
/* Level 2..N-1. */
|
||||||
|
for (i = v_l2_levels; i > 0; i--) {
|
||||||
|
void **p = qatomic_rcu_read(lp);
|
||||||
|
|
||||||
|
if (p == NULL) {
|
||||||
|
void *existing;
|
||||||
|
|
||||||
|
if (!alloc) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
p = g_new0(void *, V_L2_SIZE);
|
||||||
|
existing = qatomic_cmpxchg(lp, NULL, p);
|
||||||
|
if (unlikely(existing)) {
|
||||||
|
g_free(p);
|
||||||
|
p = existing;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
pd = qatomic_rcu_read(lp);
|
||||||
|
if (pd == NULL) {
|
||||||
|
void *existing;
|
||||||
|
|
||||||
|
if (!alloc) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pd = g_new0(PageDesc, V_L2_SIZE);
|
||||||
|
for (int i = 0; i < V_L2_SIZE; i++) {
|
||||||
|
qemu_spin_init(&pd[i].lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
existing = qatomic_cmpxchg(lp, NULL, pd);
|
||||||
|
if (unlikely(existing)) {
|
||||||
|
for (int i = 0; i < V_L2_SIZE; i++) {
|
||||||
|
qemu_spin_destroy(&pd[i].lock);
|
||||||
|
}
|
||||||
|
g_free(pd);
|
||||||
|
pd = existing;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pd + (index & (V_L2_SIZE - 1));
|
||||||
|
}
|
||||||
|
|
||||||
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
|
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
|
||||||
static void tb_remove_all_1(int level, void **lp)
|
static void tb_remove_all_1(int level, void **lp)
|
||||||
|
@ -420,6 +525,17 @@ static void tb_phys_invalidate__locked(TranslationBlock *tb)
|
||||||
qemu_thread_jit_execute();
|
qemu_thread_jit_execute();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
static inline void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
|
||||||
|
PageDesc **ret_p2, tb_page_addr_t phys2,
|
||||||
|
bool alloc)
|
||||||
|
{
|
||||||
|
*ret_p1 = NULL;
|
||||||
|
*ret_p2 = NULL;
|
||||||
|
}
|
||||||
|
static inline void page_lock_tb(const TranslationBlock *tb) { }
|
||||||
|
static inline void page_unlock_tb(const TranslationBlock *tb) { }
|
||||||
|
#else
|
||||||
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
|
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
|
||||||
PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
|
PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
|
||||||
{
|
{
|
||||||
|
@ -460,10 +576,6 @@ static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USER_ONLY
|
|
||||||
static inline void page_lock_tb(const TranslationBlock *tb) { }
|
|
||||||
static inline void page_unlock_tb(const TranslationBlock *tb) { }
|
|
||||||
#else
|
|
||||||
/* lock the page(s) of a TB in the correct acquisition order */
|
/* lock the page(s) of a TB in the correct acquisition order */
|
||||||
static void page_lock_tb(const TranslationBlock *tb)
|
static void page_lock_tb(const TranslationBlock *tb)
|
||||||
{
|
{
|
||||||
|
|
|
@ -114,37 +114,8 @@ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
|
||||||
sizeof_field(TranslationBlock, trace_vcpu_dstate)
|
sizeof_field(TranslationBlock, trace_vcpu_dstate)
|
||||||
* BITS_PER_BYTE);
|
* BITS_PER_BYTE);
|
||||||
|
|
||||||
/*
|
|
||||||
* L1 Mapping properties
|
|
||||||
*/
|
|
||||||
int v_l1_size;
|
|
||||||
int v_l1_shift;
|
|
||||||
int v_l2_levels;
|
|
||||||
|
|
||||||
void *l1_map[V_L1_MAX_SIZE];
|
|
||||||
|
|
||||||
TBContext tb_ctx;
|
TBContext tb_ctx;
|
||||||
|
|
||||||
static void page_table_config_init(void)
|
|
||||||
{
|
|
||||||
uint32_t v_l1_bits;
|
|
||||||
|
|
||||||
assert(TARGET_PAGE_BITS);
|
|
||||||
/* The bits remaining after N lower levels of page tables. */
|
|
||||||
v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
|
|
||||||
if (v_l1_bits < V_L1_MIN_BITS) {
|
|
||||||
v_l1_bits += V_L2_BITS;
|
|
||||||
}
|
|
||||||
|
|
||||||
v_l1_size = 1 << v_l1_bits;
|
|
||||||
v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
|
|
||||||
v_l2_levels = v_l1_shift / V_L2_BITS - 1;
|
|
||||||
|
|
||||||
assert(v_l1_bits <= V_L1_MAX_BITS);
|
|
||||||
assert(v_l1_shift % V_L2_BITS == 0);
|
|
||||||
assert(v_l2_levels >= 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Encode VAL as a signed leb128 sequence at P.
|
/* Encode VAL as a signed leb128 sequence at P.
|
||||||
Return P incremented past the encoded value. */
|
Return P incremented past the encoded value. */
|
||||||
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
|
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
|
||||||
|
@ -339,72 +310,6 @@ void page_init(void)
|
||||||
page_table_config_init();
|
page_table_config_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
|
|
||||||
{
|
|
||||||
PageDesc *pd;
|
|
||||||
void **lp;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* Level 1. Always allocated. */
|
|
||||||
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
|
||||||
|
|
||||||
/* Level 2..N-1. */
|
|
||||||
for (i = v_l2_levels; i > 0; i--) {
|
|
||||||
void **p = qatomic_rcu_read(lp);
|
|
||||||
|
|
||||||
if (p == NULL) {
|
|
||||||
void *existing;
|
|
||||||
|
|
||||||
if (!alloc) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
p = g_new0(void *, V_L2_SIZE);
|
|
||||||
existing = qatomic_cmpxchg(lp, NULL, p);
|
|
||||||
if (unlikely(existing)) {
|
|
||||||
g_free(p);
|
|
||||||
p = existing;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
pd = qatomic_rcu_read(lp);
|
|
||||||
if (pd == NULL) {
|
|
||||||
void *existing;
|
|
||||||
|
|
||||||
if (!alloc) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
pd = g_new0(PageDesc, V_L2_SIZE);
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < V_L2_SIZE; i++) {
|
|
||||||
qemu_spin_init(&pd[i].lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
existing = qatomic_cmpxchg(lp, NULL, pd);
|
|
||||||
if (unlikely(existing)) {
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < V_L2_SIZE; i++) {
|
|
||||||
qemu_spin_destroy(&pd[i].lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
g_free(pd);
|
|
||||||
pd = existing;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return pd + (index & (V_L2_SIZE - 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* In user-mode page locks aren't used; mmap_lock is enough */
|
/* In user-mode page locks aren't used; mmap_lock is enough */
|
||||||
#ifdef CONFIG_USER_ONLY
|
#ifdef CONFIG_USER_ONLY
|
||||||
struct page_collection *
|
struct page_collection *
|
||||||
|
|
Loading…
Reference in New Issue