mirror of https://github.com/xemu-project/xemu.git
Use a local version of GTree [#285]
Fix page_set_flags vs the last page of the address space [#1528] Re-enable gdbstub breakpoints under KVM -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmQjcLIdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8rkgf/ZazodovRKxfaO622 mGW7ywIm+hIZYmKC7ObiMKFrBoCyeXH9yOLSx42T70QstWvBMukjovLMz1+Ttbo1 VOvpGH2B5W76l3i+muAlKxFRbBH2kMLTaL+BXtkmkL4FJ9bS8WiPApsL3lEX/q2E 3kqaT3N3C09sWO5oVAPGTUHL0EutKhOar2VZL0+PVPFzL3BNPhnQH9QcbNvDBV3n cx3GSXZyL7Plyi+qwsKf/3Jo+F2wr2NVf3Dqscu9T1N1kI5hSjRpwqUEJzJZ5rei ly/gBXC/J7+WN+x+w2JlN0kWXWqC0QbDfZnj96Pd3owWZ7j4sT9zR5fcNenecxlR 38Bo0w== =ysF7 -----END PGP SIGNATURE----- Merge tag 'pull-tcg-20230328' of https://gitlab.com/rth7680/qemu into staging Use a local version of GTree [#285] Fix page_set_flags vs the last page of the address space [#1528] Re-enable gdbstub breakpoints under KVM # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmQjcLIdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8rkgf/ZazodovRKxfaO622 # mGW7ywIm+hIZYmKC7ObiMKFrBoCyeXH9yOLSx42T70QstWvBMukjovLMz1+Ttbo1 # VOvpGH2B5W76l3i+muAlKxFRbBH2kMLTaL+BXtkmkL4FJ9bS8WiPApsL3lEX/q2E # 3kqaT3N3C09sWO5oVAPGTUHL0EutKhOar2VZL0+PVPFzL3BNPhnQH9QcbNvDBV3n # cx3GSXZyL7Plyi+qwsKf/3Jo+F2wr2NVf3Dqscu9T1N1kI5hSjRpwqUEJzJZ5rei # ly/gBXC/J7+WN+x+w2JlN0kWXWqC0QbDfZnj96Pd3owWZ7j4sT9zR5fcNenecxlR # 38Bo0w== # =ysF7 # -----END PGP SIGNATURE----- # gpg: Signature made Tue 28 Mar 2023 23:56:50 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20230328' of https://gitlab.com/rth7680/qemu: softmmu: Restore use of CPU watchpoint for all accelerators softmmu/watchpoint: Add missing 'qemu/error-report.h' include softmmu: Restrict cpu_check_watchpoint / address_matches to TCG accel linux-user/arm: Take more care allocating commpage include/exec: Change reserved_va semantics to last byte linux-user: Pass last not end to probe_guest_base accel/tcg: Pass last not end to tb_invalidate_phys_range accel/tcg: Pass last not end to tb_invalidate_phys_page_range__locked accel/tcg: Pass last not end to page_collection_lock accel/tcg: Pass last not end to PAGE_FOR_EACH_TB accel/tcg: Pass last not end to page_reset_target_data accel/tcg: Pass last not end to page_set_flags linux-user: Diagnose misaligned -R size tcg: use QTree instead of GTree util: import GTree as QTree Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
f00506aeca
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/interval-tree.h"
|
||||
#include "qemu/qtree.h"
|
||||
#include "exec/cputlb.h"
|
||||
#include "exec/log.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
@ -126,29 +127,29 @@ static void tb_remove(TranslationBlock *tb)
|
|||
}
|
||||
|
||||
/* TODO: For now, still shared with translate-all.c for system mode. */
|
||||
#define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \
|
||||
for (T = foreach_tb_first(start, end), \
|
||||
N = foreach_tb_next(T, start, end); \
|
||||
#define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \
|
||||
for (T = foreach_tb_first(start, last), \
|
||||
N = foreach_tb_next(T, start, last); \
|
||||
T != NULL; \
|
||||
T = N, N = foreach_tb_next(N, start, end))
|
||||
T = N, N = foreach_tb_next(N, start, last))
|
||||
|
||||
typedef TranslationBlock *PageForEachNext;
|
||||
|
||||
static PageForEachNext foreach_tb_first(tb_page_addr_t start,
|
||||
tb_page_addr_t end)
|
||||
tb_page_addr_t last)
|
||||
{
|
||||
IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1);
|
||||
IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last);
|
||||
return n ? container_of(n, TranslationBlock, itree) : NULL;
|
||||
}
|
||||
|
||||
static PageForEachNext foreach_tb_next(PageForEachNext tb,
|
||||
tb_page_addr_t start,
|
||||
tb_page_addr_t end)
|
||||
tb_page_addr_t last)
|
||||
{
|
||||
IntervalTreeNode *n;
|
||||
|
||||
if (tb) {
|
||||
n = interval_tree_iter_next(&tb->itree, start, end - 1);
|
||||
n = interval_tree_iter_next(&tb->itree, start, last);
|
||||
if (n) {
|
||||
return container_of(n, TranslationBlock, itree);
|
||||
}
|
||||
|
@ -314,12 +315,12 @@ struct page_entry {
|
|||
* See also: page_collection_lock().
|
||||
*/
|
||||
struct page_collection {
|
||||
GTree *tree;
|
||||
QTree *tree;
|
||||
struct page_entry *max;
|
||||
};
|
||||
|
||||
typedef int PageForEachNext;
|
||||
#define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \
|
||||
#define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \
|
||||
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
|
||||
|
||||
#ifdef CONFIG_DEBUG_TCG
|
||||
|
@ -467,7 +468,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
|
|||
struct page_entry *pe;
|
||||
PageDesc *pd;
|
||||
|
||||
pe = g_tree_lookup(set->tree, &index);
|
||||
pe = q_tree_lookup(set->tree, &index);
|
||||
if (pe) {
|
||||
return false;
|
||||
}
|
||||
|
@ -478,7 +479,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
|
|||
}
|
||||
|
||||
pe = page_entry_new(pd, index);
|
||||
g_tree_insert(set->tree, &pe->index, pe);
|
||||
q_tree_insert(set->tree, &pe->index, pe);
|
||||
|
||||
/*
|
||||
* If this is either (1) the first insertion or (2) a page whose index
|
||||
|
@ -510,30 +511,30 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
|
|||
}
|
||||
|
||||
/*
|
||||
* Lock a range of pages ([@start,@end[) as well as the pages of all
|
||||
* Lock a range of pages ([@start,@last]) as well as the pages of all
|
||||
* intersecting TBs.
|
||||
* Locking order: acquire locks in ascending order of page index.
|
||||
*/
|
||||
static struct page_collection *page_collection_lock(tb_page_addr_t start,
|
||||
tb_page_addr_t end)
|
||||
tb_page_addr_t last)
|
||||
{
|
||||
struct page_collection *set = g_malloc(sizeof(*set));
|
||||
tb_page_addr_t index;
|
||||
PageDesc *pd;
|
||||
|
||||
start >>= TARGET_PAGE_BITS;
|
||||
end >>= TARGET_PAGE_BITS;
|
||||
g_assert(start <= end);
|
||||
last >>= TARGET_PAGE_BITS;
|
||||
g_assert(start <= last);
|
||||
|
||||
set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
|
||||
set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
|
||||
page_entry_destroy);
|
||||
set->max = NULL;
|
||||
assert_no_pages_locked();
|
||||
|
||||
retry:
|
||||
g_tree_foreach(set->tree, page_entry_lock, NULL);
|
||||
q_tree_foreach(set->tree, page_entry_lock, NULL);
|
||||
|
||||
for (index = start; index <= end; index++) {
|
||||
for (index = start; index <= last; index++) {
|
||||
TranslationBlock *tb;
|
||||
PageForEachNext n;
|
||||
|
||||
|
@ -542,7 +543,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
|
|||
continue;
|
||||
}
|
||||
if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
|
||||
g_tree_foreach(set->tree, page_entry_unlock, NULL);
|
||||
q_tree_foreach(set->tree, page_entry_unlock, NULL);
|
||||
goto retry;
|
||||
}
|
||||
assert_page_locked(pd);
|
||||
|
@ -551,7 +552,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
|
|||
(tb_page_addr1(tb) != -1 &&
|
||||
page_trylock_add(set, tb_page_addr1(tb)))) {
|
||||
/* drop all locks, and reacquire in order */
|
||||
g_tree_foreach(set->tree, page_entry_unlock, NULL);
|
||||
q_tree_foreach(set->tree, page_entry_unlock, NULL);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
@ -562,7 +563,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
|
|||
static void page_collection_unlock(struct page_collection *set)
|
||||
{
|
||||
/* entries are unlocked and freed via page_entry_destroy */
|
||||
g_tree_destroy(set->tree);
|
||||
q_tree_destroy(set->tree);
|
||||
g_free(set);
|
||||
}
|
||||
|
||||
|
@ -990,14 +991,14 @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
|||
* Called with mmap_lock held for user-mode emulation.
|
||||
* NOTE: this function must not be called while a TB is running.
|
||||
*/
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
PageForEachNext n;
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
PAGE_FOR_EACH_TB(start, end, unused, tb, n) {
|
||||
PAGE_FOR_EACH_TB(start, last, unused, tb, n) {
|
||||
tb_phys_invalidate__locked(tb);
|
||||
}
|
||||
}
|
||||
|
@ -1009,11 +1010,11 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
|||
*/
|
||||
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
{
|
||||
tb_page_addr_t start, end;
|
||||
tb_page_addr_t start, last;
|
||||
|
||||
start = addr & TARGET_PAGE_MASK;
|
||||
end = start + TARGET_PAGE_SIZE;
|
||||
tb_invalidate_phys_range(start, end);
|
||||
last = addr | ~TARGET_PAGE_MASK;
|
||||
tb_invalidate_phys_range(start, last);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1029,6 +1030,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
|
|||
bool current_tb_modified;
|
||||
TranslationBlock *tb;
|
||||
PageForEachNext n;
|
||||
tb_page_addr_t last;
|
||||
|
||||
/*
|
||||
* Without precise smc semantics, or when outside of a TB,
|
||||
|
@ -1045,10 +1047,11 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
|
|||
assert_memory_lock();
|
||||
current_tb = tcg_tb_lookup(pc);
|
||||
|
||||
last = addr | ~TARGET_PAGE_MASK;
|
||||
addr &= TARGET_PAGE_MASK;
|
||||
current_tb_modified = false;
|
||||
|
||||
PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) {
|
||||
PAGE_FOR_EACH_TB(addr, last, unused, tb, n) {
|
||||
if (current_tb == tb &&
|
||||
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
|
||||
/*
|
||||
|
@ -1080,11 +1083,10 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
|
|||
static void
|
||||
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
PageDesc *p, tb_page_addr_t start,
|
||||
tb_page_addr_t end,
|
||||
tb_page_addr_t last,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
tb_page_addr_t tb_start, tb_end;
|
||||
PageForEachNext n;
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
bool current_tb_modified = false;
|
||||
|
@ -1092,22 +1094,22 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
|||
#endif /* TARGET_HAS_PRECISE_SMC */
|
||||
|
||||
/*
|
||||
* We remove all the TBs in the range [start, end[.
|
||||
* We remove all the TBs in the range [start, last].
|
||||
* XXX: see if in some cases it could be faster to invalidate all the code
|
||||
*/
|
||||
PAGE_FOR_EACH_TB(start, end, p, tb, n) {
|
||||
PAGE_FOR_EACH_TB(start, last, p, tb, n) {
|
||||
tb_page_addr_t tb_start, tb_last;
|
||||
|
||||
/* NOTE: this is subtle as a TB may span two physical pages */
|
||||
tb_start = tb_page_addr0(tb);
|
||||
tb_last = tb_start + tb->size - 1;
|
||||
if (n == 0) {
|
||||
/* NOTE: tb_end may be after the end of the page, but
|
||||
it is not a problem */
|
||||
tb_start = tb_page_addr0(tb);
|
||||
tb_end = tb_start + tb->size;
|
||||
tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK);
|
||||
} else {
|
||||
tb_start = tb_page_addr1(tb);
|
||||
tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
|
||||
& ~TARGET_PAGE_MASK);
|
||||
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
|
||||
}
|
||||
if (!(tb_end <= start || tb_start >= end)) {
|
||||
if (!(tb_last < start || tb_start > last)) {
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
if (current_tb == tb &&
|
||||
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
|
||||
|
@ -1149,7 +1151,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
|||
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
{
|
||||
struct page_collection *pages;
|
||||
tb_page_addr_t start, end;
|
||||
tb_page_addr_t start, last;
|
||||
PageDesc *p;
|
||||
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
|
@ -1158,35 +1160,37 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
|
|||
}
|
||||
|
||||
start = addr & TARGET_PAGE_MASK;
|
||||
end = start + TARGET_PAGE_SIZE;
|
||||
pages = page_collection_lock(start, end);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
|
||||
last = addr | ~TARGET_PAGE_MASK;
|
||||
pages = page_collection_lock(start, last);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate all TBs which intersect with the target physical address range
|
||||
* [start;end[. NOTE: start and end may refer to *different* physical pages.
|
||||
* [start;last]. NOTE: start and end may refer to *different* physical pages.
|
||||
* 'is_cpu_write_access' should be true if called from a real cpu write
|
||||
* access: the virtual CPU will exit the current TB if code is modified inside
|
||||
* this TB.
|
||||
*/
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
|
||||
{
|
||||
struct page_collection *pages;
|
||||
tb_page_addr_t next;
|
||||
tb_page_addr_t index, index_last;
|
||||
|
||||
pages = page_collection_lock(start, end);
|
||||
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
||||
start < end;
|
||||
start = next, next += TARGET_PAGE_SIZE) {
|
||||
PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
|
||||
tb_page_addr_t bound = MIN(next, end);
|
||||
pages = page_collection_lock(start, last);
|
||||
|
||||
index_last = last >> TARGET_PAGE_BITS;
|
||||
for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
|
||||
PageDesc *pd = page_find(index);
|
||||
tb_page_addr_t bound;
|
||||
|
||||
if (pd == NULL) {
|
||||
continue;
|
||||
}
|
||||
assert_page_locked(pd);
|
||||
bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
|
||||
bound = MIN(bound, last);
|
||||
tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
|
||||
}
|
||||
page_collection_unlock(pages);
|
||||
|
@ -1207,7 +1211,7 @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
|
|||
}
|
||||
|
||||
assert_page_locked(p);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1221,7 +1225,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
|
|||
{
|
||||
struct page_collection *pages;
|
||||
|
||||
pages = page_collection_lock(ram_addr, ram_addr + size);
|
||||
pages = page_collection_lock(ram_addr, ram_addr + size - 1);
|
||||
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
|
|
|
@ -572,7 +572,7 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
|
|||
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
||||
addr = get_page_addr_code(env, pc);
|
||||
if (addr != -1) {
|
||||
tb_invalidate_phys_range(addr, addr + 1);
|
||||
tb_invalidate_phys_range(addr, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -480,24 +480,22 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
|
|||
* The flag PAGE_WRITE_ORG is positioned automatically depending
|
||||
* on PAGE_WRITE. The mmap_lock should already be held.
|
||||
*/
|
||||
void page_set_flags(target_ulong start, target_ulong end, int flags)
|
||||
void page_set_flags(target_ulong start, target_ulong last, int flags)
|
||||
{
|
||||
target_ulong last;
|
||||
bool reset = false;
|
||||
bool inval_tb = false;
|
||||
|
||||
/* This function should never be called with addresses outside the
|
||||
guest address space. If this assert fires, it probably indicates
|
||||
a missing call to h2g_valid. */
|
||||
assert(start < end);
|
||||
assert(end - 1 <= GUEST_ADDR_MAX);
|
||||
assert(start <= last);
|
||||
assert(last <= GUEST_ADDR_MAX);
|
||||
/* Only set PAGE_ANON with new mappings. */
|
||||
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
|
||||
assert_memory_lock();
|
||||
|
||||
start = start & TARGET_PAGE_MASK;
|
||||
end = TARGET_PAGE_ALIGN(end);
|
||||
last = end - 1;
|
||||
start &= TARGET_PAGE_MASK;
|
||||
last |= ~TARGET_PAGE_MASK;
|
||||
|
||||
if (!(flags & PAGE_VALID)) {
|
||||
flags = 0;
|
||||
|
@ -510,7 +508,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
|
|||
}
|
||||
|
||||
if (!flags || reset) {
|
||||
page_reset_target_data(start, end);
|
||||
page_reset_target_data(start, last);
|
||||
inval_tb |= pageflags_unset(start, last);
|
||||
}
|
||||
if (flags) {
|
||||
|
@ -518,7 +516,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
|
|||
~(reset ? 0 : PAGE_STICKY));
|
||||
}
|
||||
if (inval_tb) {
|
||||
tb_invalidate_phys_range(start, end);
|
||||
tb_invalidate_phys_range(start, last);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -816,15 +814,14 @@ typedef struct TargetPageDataNode {
|
|||
|
||||
static IntervalTreeRoot targetdata_root;
|
||||
|
||||
void page_reset_target_data(target_ulong start, target_ulong end)
|
||||
void page_reset_target_data(target_ulong start, target_ulong last)
|
||||
{
|
||||
IntervalTreeNode *n, *next;
|
||||
target_ulong last;
|
||||
|
||||
assert_memory_lock();
|
||||
|
||||
start = start & TARGET_PAGE_MASK;
|
||||
last = TARGET_PAGE_ALIGN(end) - 1;
|
||||
start &= TARGET_PAGE_MASK;
|
||||
last |= ~TARGET_PAGE_MASK;
|
||||
|
||||
for (n = interval_tree_iter_first(&targetdata_root, start, last),
|
||||
next = n ? interval_tree_iter_next(n, start, last) : NULL;
|
||||
|
@ -887,7 +884,7 @@ void *page_get_target_data(target_ulong address)
|
|||
return t->data[(page - region) >> TARGET_PAGE_BITS];
|
||||
}
|
||||
#else
|
||||
void page_reset_target_data(target_ulong start, target_ulong end) { }
|
||||
void page_reset_target_data(target_ulong start, target_ulong last) { }
|
||||
#endif /* TARGET_PAGE_DATA_SIZE */
|
||||
|
||||
/* The softmmu versions of these helpers are in cputlb.c. */
|
||||
|
|
|
@ -68,13 +68,9 @@ bool have_guest_base;
|
|||
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
|
||||
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
|
||||
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
|
||||
/*
|
||||
* There are a number of places where we assign reserved_va to a variable
|
||||
* of type abi_ulong and expect it to fit. Avoid the last page.
|
||||
*/
|
||||
# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK)
|
||||
# define MAX_RESERVED_VA 0xfffffffful
|
||||
# else
|
||||
# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
|
||||
# define MAX_RESERVED_VA ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
|
||||
# endif
|
||||
# else
|
||||
# define MAX_RESERVED_VA 0
|
||||
|
@ -466,7 +462,7 @@ int main(int argc, char **argv)
|
|||
envlist_free(envlist);
|
||||
|
||||
if (reserved_va) {
|
||||
mmap_next_start = reserved_va;
|
||||
mmap_next_start = reserved_va + 1;
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
@ -118,7 +118,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
|
|||
if (ret != 0)
|
||||
goto error;
|
||||
}
|
||||
page_set_flags(start, start + len, prot | PAGE_VALID);
|
||||
page_set_flags(start, start + len - 1, prot | PAGE_VALID);
|
||||
mmap_unlock();
|
||||
return 0;
|
||||
error:
|
||||
|
@ -234,7 +234,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
|
|||
size = HOST_PAGE_ALIGN(size) + alignment;
|
||||
end_addr = start + size;
|
||||
if (end_addr > reserved_va) {
|
||||
end_addr = reserved_va;
|
||||
end_addr = reserved_va + 1;
|
||||
}
|
||||
addr = end_addr - qemu_host_page_size;
|
||||
|
||||
|
@ -243,7 +243,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
|
|||
if (looped) {
|
||||
return (abi_ulong)-1;
|
||||
}
|
||||
end_addr = reserved_va;
|
||||
end_addr = reserved_va + 1;
|
||||
addr = end_addr - qemu_host_page_size;
|
||||
looped = 1;
|
||||
continue;
|
||||
|
@ -656,7 +656,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
|
|||
}
|
||||
}
|
||||
the_end1:
|
||||
page_set_flags(start, start + len, prot | PAGE_VALID);
|
||||
page_set_flags(start, start + len - 1, prot | PAGE_VALID);
|
||||
the_end:
|
||||
#ifdef DEBUG_MMAP
|
||||
printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
|
||||
|
@ -767,7 +767,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
|
|||
}
|
||||
|
||||
if (ret == 0) {
|
||||
page_set_flags(start, start + len, 0);
|
||||
page_set_flags(start, start + len - 1, 0);
|
||||
}
|
||||
mmap_unlock();
|
||||
return ret;
|
||||
|
|
|
@ -231,6 +231,7 @@ safe_stack=""
|
|||
use_containers="yes"
|
||||
gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb")
|
||||
gdb_arches=""
|
||||
glib_has_gslice="no"
|
||||
|
||||
if test -e "$source_path/.git"
|
||||
then
|
||||
|
@ -1494,6 +1495,17 @@ for i in $glib_modules; do
|
|||
fi
|
||||
done
|
||||
|
||||
# Check whether glib has gslice, which we have to avoid for correctness.
|
||||
# TODO: remove this check and the corresponding workaround (qtree) when
|
||||
# the minimum supported glib is >= $glib_dropped_gslice_version.
|
||||
glib_dropped_gslice_version=2.75.3
|
||||
for i in $glib_modules; do
|
||||
if ! $pkg_config --atleast-version=$glib_dropped_gslice_version $i; then
|
||||
glib_has_gslice="yes"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
glib_bindir="$($pkg_config --variable=bindir glib-2.0)"
|
||||
if test -z "$glib_bindir" ; then
|
||||
glib_bindir="$($pkg_config --variable=prefix glib-2.0)"/bin
|
||||
|
@ -2420,6 +2432,9 @@ echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak
|
|||
echo "GLIB_LIBS=$glib_libs" >> $config_host_mak
|
||||
echo "GLIB_BINDIR=$glib_bindir" >> $config_host_mak
|
||||
echo "GLIB_VERSION=$($pkg_config --modversion glib-2.0)" >> $config_host_mak
|
||||
if test "$glib_has_gslice" = "yes" ; then
|
||||
echo "HAVE_GLIB_WITH_SLICE_ALLOCATOR=y" >> $config_host_mak
|
||||
fi
|
||||
echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak
|
||||
echo "EXESUF=$EXESUF" >> $config_host_mak
|
||||
|
||||
|
|
|
@ -152,6 +152,15 @@ static inline void tswap64s(uint64_t *s)
|
|||
*/
|
||||
extern uintptr_t guest_base;
|
||||
extern bool have_guest_base;
|
||||
|
||||
/*
|
||||
* If non-zero, the guest virtual address space is a contiguous subset
|
||||
* of the host virtual address space, i.e. '-R reserved_va' is in effect
|
||||
* either from the command-line or by default. The value is the last
|
||||
* byte of the guest address space e.g. UINT32_MAX.
|
||||
*
|
||||
* If zero, the host and guest virtual address spaces are intermingled.
|
||||
*/
|
||||
extern unsigned long reserved_va;
|
||||
|
||||
/*
|
||||
|
@ -171,7 +180,7 @@ extern unsigned long reserved_va;
|
|||
#define GUEST_ADDR_MAX_ \
|
||||
((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
|
||||
UINT32_MAX : ~0ul)
|
||||
#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
|
||||
#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
|
||||
|
||||
#else
|
||||
|
||||
|
@ -276,8 +285,8 @@ typedef int (*walk_memory_regions_fn)(void *, target_ulong,
|
|||
int walk_memory_regions(void *, walk_memory_regions_fn);
|
||||
|
||||
int page_get_flags(target_ulong address);
|
||||
void page_set_flags(target_ulong start, target_ulong end, int flags);
|
||||
void page_reset_target_data(target_ulong start, target_ulong end);
|
||||
void page_set_flags(target_ulong start, target_ulong last, int flags);
|
||||
void page_reset_target_data(target_ulong start, target_ulong last);
|
||||
int page_check_range(target_ulong start, target_ulong len, int flags);
|
||||
|
||||
/**
|
||||
|
|
|
@ -678,7 +678,7 @@ void tb_invalidate_phys_addr(target_ulong addr);
|
|||
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
|
||||
#endif
|
||||
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
|
||||
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
|
||||
|
||||
/* GETPC is the true target of the return instruction that we'll execute. */
|
||||
|
|
|
@ -949,7 +949,7 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
|
|||
return false;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_TCG) || defined(CONFIG_USER_ONLY)
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
|
||||
int flags, CPUWatchpoint **watchpoint)
|
||||
{
|
||||
|
@ -970,17 +970,6 @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
|
|||
static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
MemTxAttrs atr, int fl, uintptr_t ra)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int cpu_watchpoint_address_matches(CPUState *cpu,
|
||||
vaddr addr, vaddr len)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
|
||||
int flags, CPUWatchpoint **watchpoint);
|
||||
|
@ -988,32 +977,6 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
|
|||
vaddr len, int flags);
|
||||
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
|
||||
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
|
||||
|
||||
/**
|
||||
* cpu_check_watchpoint:
|
||||
* @cpu: cpu context
|
||||
* @addr: guest virtual address
|
||||
* @len: access length
|
||||
* @attrs: memory access attributes
|
||||
* @flags: watchpoint access type
|
||||
* @ra: unwind return address
|
||||
*
|
||||
* Check for a watchpoint hit in [addr, addr+len) of the type
|
||||
* specified by @flags. Exit via exception with a hit.
|
||||
*/
|
||||
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
MemTxAttrs attrs, int flags, uintptr_t ra);
|
||||
|
||||
/**
|
||||
* cpu_watchpoint_address_matches:
|
||||
* @cpu: cpu context
|
||||
* @addr: guest virtual address
|
||||
* @len: access length
|
||||
*
|
||||
* Return the watchpoint flags that apply to [addr, addr+len).
|
||||
* If no watchpoint is registered for the range, the result is 0.
|
||||
*/
|
||||
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
|
|
@ -175,4 +175,47 @@ struct TCGCPUOps {
|
|||
|
||||
};
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
MemTxAttrs atr, int fl, uintptr_t ra)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int cpu_watchpoint_address_matches(CPUState *cpu,
|
||||
vaddr addr, vaddr len)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/**
|
||||
* cpu_check_watchpoint:
|
||||
* @cpu: cpu context
|
||||
* @addr: guest virtual address
|
||||
* @len: access length
|
||||
* @attrs: memory access attributes
|
||||
* @flags: watchpoint access type
|
||||
* @ra: unwind return address
|
||||
*
|
||||
* Check for a watchpoint hit in [addr, addr+len) of the type
|
||||
* specified by @flags. Exit via exception with a hit.
|
||||
*/
|
||||
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
MemTxAttrs attrs, int flags, uintptr_t ra);
|
||||
|
||||
/**
|
||||
* cpu_watchpoint_address_matches:
|
||||
* @cpu: cpu context
|
||||
* @addr: guest virtual address
|
||||
* @len: access length
|
||||
*
|
||||
* Return the watchpoint flags that apply to [addr, addr+len).
|
||||
* If no watchpoint is registered for the range, the result is 0.
|
||||
*/
|
||||
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* TCG_CPU_OPS_H */
|
||||
|
|
|
@ -0,0 +1,201 @@
|
|||
/*
|
||||
* GLIB - Library of useful routines for C programming
|
||||
* Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Modified by the GLib Team and others 1997-2000. See the AUTHORS
|
||||
* file for a list of people on the GLib Team. See the ChangeLog
|
||||
* files for a list of changes. These files are distributed with
|
||||
* GLib at ftp://ftp.gtk.org/pub/gtk/.
|
||||
*/
|
||||
|
||||
/*
|
||||
* QTree is a partial import of Glib's GTree. The parts excluded correspond
|
||||
* to API calls either deprecated (e.g. g_tree_traverse) or recently added
|
||||
* (e.g. g_tree_search_node, added in 2.68); neither have callers in QEMU.
|
||||
*
|
||||
* The reason for this import is to allow us to control the memory allocator
|
||||
* used by the tree implementation. Until Glib 2.75.3, GTree uses Glib's
|
||||
* slice allocator, which causes problems when forking in user-mode;
|
||||
* see https://gitlab.com/qemu-project/qemu/-/issues/285 and glib's
|
||||
* "45b5a6c1e gslice: Remove slice allocator and use malloc() instead".
|
||||
*
|
||||
* TODO: remove QTree when QEMU's minimum Glib version is >= 2.75.3.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_QTREE_H
|
||||
#define QEMU_QTREE_H
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#ifdef HAVE_GLIB_WITH_SLICE_ALLOCATOR
|
||||
|
||||
typedef struct _QTree QTree;
|
||||
|
||||
typedef struct _QTreeNode QTreeNode;
|
||||
|
||||
typedef gboolean (*QTraverseNodeFunc)(QTreeNode *node,
|
||||
gpointer user_data);
|
||||
|
||||
/*
|
||||
* Balanced binary trees
|
||||
*/
|
||||
QTree *q_tree_new(GCompareFunc key_compare_func);
|
||||
QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
|
||||
gpointer key_compare_data);
|
||||
QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
|
||||
gpointer key_compare_data,
|
||||
GDestroyNotify key_destroy_func,
|
||||
GDestroyNotify value_destroy_func);
|
||||
QTree *q_tree_ref(QTree *tree);
|
||||
void q_tree_unref(QTree *tree);
|
||||
void q_tree_destroy(QTree *tree);
|
||||
void q_tree_insert(QTree *tree,
|
||||
gpointer key,
|
||||
gpointer value);
|
||||
void q_tree_replace(QTree *tree,
|
||||
gpointer key,
|
||||
gpointer value);
|
||||
gboolean q_tree_remove(QTree *tree,
|
||||
gconstpointer key);
|
||||
gboolean q_tree_steal(QTree *tree,
|
||||
gconstpointer key);
|
||||
gpointer q_tree_lookup(QTree *tree,
|
||||
gconstpointer key);
|
||||
gboolean q_tree_lookup_extended(QTree *tree,
|
||||
gconstpointer lookup_key,
|
||||
gpointer *orig_key,
|
||||
gpointer *value);
|
||||
void q_tree_foreach(QTree *tree,
|
||||
GTraverseFunc func,
|
||||
gpointer user_data);
|
||||
gpointer q_tree_search(QTree *tree,
|
||||
GCompareFunc search_func,
|
||||
gconstpointer user_data);
|
||||
gint q_tree_height(QTree *tree);
|
||||
gint q_tree_nnodes(QTree *tree);
|
||||
|
||||
#else /* !HAVE_GLIB_WITH_SLICE_ALLOCATOR */
|
||||
|
||||
typedef GTree QTree;
|
||||
typedef GTreeNode QTreeNode;
|
||||
typedef GTraverseNodeFunc QTraverseNodeFunc;
|
||||
|
||||
static inline QTree *q_tree_new(GCompareFunc key_compare_func)
|
||||
{
|
||||
return g_tree_new(key_compare_func);
|
||||
}
|
||||
|
||||
static inline QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
|
||||
gpointer key_compare_data)
|
||||
{
|
||||
return g_tree_new_with_data(key_compare_func, key_compare_data);
|
||||
}
|
||||
|
||||
static inline QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
|
||||
gpointer key_compare_data,
|
||||
GDestroyNotify key_destroy_func,
|
||||
GDestroyNotify value_destroy_func)
|
||||
{
|
||||
return g_tree_new_full(key_compare_func, key_compare_data,
|
||||
key_destroy_func, value_destroy_func);
|
||||
}
|
||||
|
||||
static inline QTree *q_tree_ref(QTree *tree)
|
||||
{
|
||||
return g_tree_ref(tree);
|
||||
}
|
||||
|
||||
static inline void q_tree_unref(QTree *tree)
|
||||
{
|
||||
g_tree_unref(tree);
|
||||
}
|
||||
|
||||
static inline void q_tree_destroy(QTree *tree)
|
||||
{
|
||||
g_tree_destroy(tree);
|
||||
}
|
||||
|
||||
static inline void q_tree_insert(QTree *tree,
|
||||
gpointer key,
|
||||
gpointer value)
|
||||
{
|
||||
g_tree_insert(tree, key, value);
|
||||
}
|
||||
|
||||
static inline void q_tree_replace(QTree *tree,
|
||||
gpointer key,
|
||||
gpointer value)
|
||||
{
|
||||
g_tree_replace(tree, key, value);
|
||||
}
|
||||
|
||||
static inline gboolean q_tree_remove(QTree *tree,
|
||||
gconstpointer key)
|
||||
{
|
||||
return g_tree_remove(tree, key);
|
||||
}
|
||||
|
||||
static inline gboolean q_tree_steal(QTree *tree,
|
||||
gconstpointer key)
|
||||
{
|
||||
return g_tree_steal(tree, key);
|
||||
}
|
||||
|
||||
static inline gpointer q_tree_lookup(QTree *tree,
|
||||
gconstpointer key)
|
||||
{
|
||||
return g_tree_lookup(tree, key);
|
||||
}
|
||||
|
||||
static inline gboolean q_tree_lookup_extended(QTree *tree,
|
||||
gconstpointer lookup_key,
|
||||
gpointer *orig_key,
|
||||
gpointer *value)
|
||||
{
|
||||
return g_tree_lookup_extended(tree, lookup_key, orig_key, value);
|
||||
}
|
||||
|
||||
static inline void q_tree_foreach(QTree *tree,
|
||||
GTraverseFunc func,
|
||||
gpointer user_data)
|
||||
{
|
||||
return g_tree_foreach(tree, func, user_data);
|
||||
}
|
||||
|
||||
static inline gpointer q_tree_search(QTree *tree,
|
||||
GCompareFunc search_func,
|
||||
gconstpointer user_data)
|
||||
{
|
||||
return g_tree_search(tree, search_func, user_data);
|
||||
}
|
||||
|
||||
static inline gint q_tree_height(QTree *tree)
|
||||
{
|
||||
return g_tree_height(tree);
|
||||
}
|
||||
|
||||
static inline gint q_tree_nnodes(QTree *tree)
|
||||
{
|
||||
return g_tree_nnodes(tree);
|
||||
}
|
||||
|
||||
#endif /* HAVE_GLIB_WITH_SLICE_ALLOCATOR */
|
||||
|
||||
#endif /* QEMU_QTREE_H */
|
|
@ -30,7 +30,7 @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
|
|||
* the high addresses. Restrict linux-user to the
|
||||
* cached write-back RAM in the system map.
|
||||
*/
|
||||
return 0x80000000ul;
|
||||
return 0x7ffffffful;
|
||||
} else {
|
||||
/*
|
||||
* We need to be able to map the commpage.
|
||||
|
|
|
@ -208,12 +208,12 @@ static bool init_guest_commpage(void)
|
|||
* has specified -R reserved_va, which would trigger an assert().
|
||||
*/
|
||||
if (reserved_va != 0 &&
|
||||
TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
|
||||
TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
|
||||
error_report("Cannot allocate vsyscall page");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
page_set_flags(TARGET_VSYSCALL_PAGE,
|
||||
TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
|
||||
TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK,
|
||||
PAGE_EXEC | PAGE_VALID);
|
||||
return true;
|
||||
}
|
||||
|
@ -423,12 +423,32 @@ enum {
|
|||
|
||||
static bool init_guest_commpage(void)
|
||||
{
|
||||
abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
|
||||
void *want = g2h_untagged(commpage);
|
||||
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
||||
ARMCPU *cpu = ARM_CPU(thread_cpu);
|
||||
abi_ptr want = HI_COMMPAGE & TARGET_PAGE_MASK;
|
||||
abi_ptr addr;
|
||||
|
||||
if (addr == MAP_FAILED) {
|
||||
/*
|
||||
* M-profile allocates maximum of 2GB address space, so can never
|
||||
* allocate the commpage. Skip it.
|
||||
*/
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If reserved_va does not cover the commpage, we get an assert
|
||||
* in page_set_flags. Produce an intelligent error instead.
|
||||
*/
|
||||
if (reserved_va != 0 && want + TARGET_PAGE_SIZE - 1 > reserved_va) {
|
||||
error_report("Allocating guest commpage: -R 0x%" PRIx64 " too small",
|
||||
(uint64_t)reserved_va + 1);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
addr = target_mmap(want, TARGET_PAGE_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
||||
|
||||
if (addr == -1) {
|
||||
perror("Allocating guest commpage");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
@ -437,15 +457,12 @@ static bool init_guest_commpage(void)
|
|||
}
|
||||
|
||||
/* Set kernel helper versions; rest of page is 0. */
|
||||
__put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
|
||||
put_user_u32(5, 0xffff0ffcu);
|
||||
|
||||
if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
|
||||
if (target_mprotect(addr, qemu_host_page_size, PROT_READ | PROT_EXEC)) {
|
||||
perror("Protecting guest commpage");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
page_set_flags(commpage, commpage + qemu_host_page_size,
|
||||
PAGE_READ | PAGE_EXEC | PAGE_VALID);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1316,7 +1333,7 @@ static bool init_guest_commpage(void)
|
|||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
|
||||
page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
|
||||
PAGE_READ | PAGE_EXEC | PAGE_VALID);
|
||||
return true;
|
||||
}
|
||||
|
@ -1728,7 +1745,7 @@ static bool init_guest_commpage(void)
|
|||
* and implement syscalls. Here, simply mark the page executable.
|
||||
* Special case the entry points during translation (see do_page_zero).
|
||||
*/
|
||||
page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
|
||||
page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
|
||||
PAGE_EXEC | PAGE_VALID);
|
||||
return true;
|
||||
}
|
||||
|
@ -2209,7 +2226,8 @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
|
|||
|
||||
/* Ensure that the bss page(s) are valid */
|
||||
if ((page_get_flags(last_bss-1) & prot) != prot) {
|
||||
page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
|
||||
page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1,
|
||||
prot | PAGE_VALID);
|
||||
}
|
||||
|
||||
if (host_start < host_map_start) {
|
||||
|
@ -2511,7 +2529,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
|
|||
if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) {
|
||||
error_report("%s: requires more virtual address space "
|
||||
"than the host can provide (0x%" PRIx64 ")",
|
||||
image_name, (uint64_t)guest_hiaddr - guest_base);
|
||||
image_name, (uint64_t)guest_hiaddr + 1 - guest_base);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
#endif
|
||||
|
@ -2529,13 +2547,13 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
|
|||
|
||||
/* Reserve the address space for the binary, or reserved_va. */
|
||||
test = g2h_untagged(guest_loaddr);
|
||||
addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
|
||||
addr = mmap(test, guest_hiaddr - guest_loaddr + 1, PROT_NONE, flags, -1, 0);
|
||||
if (test != addr) {
|
||||
pgb_fail_in_use(image_name);
|
||||
}
|
||||
qemu_log_mask(CPU_LOG_PAGE,
|
||||
"%s: base @ %p for " TARGET_ABI_FMT_ld " bytes\n",
|
||||
__func__, addr, guest_hiaddr - guest_loaddr);
|
||||
"%s: base @ %p for %" PRIu64 " bytes\n",
|
||||
__func__, addr, (uint64_t)guest_hiaddr - guest_loaddr + 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2679,7 +2697,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
|
|||
if (hiaddr != orig_hiaddr) {
|
||||
error_report("%s: requires virtual address space that the "
|
||||
"host cannot provide (0x%" PRIx64 ")",
|
||||
image_name, (uint64_t)orig_hiaddr);
|
||||
image_name, (uint64_t)orig_hiaddr + 1);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
|
@ -2693,7 +2711,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
|
|||
* arithmetic wraps around.
|
||||
*/
|
||||
if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
|
||||
hiaddr = (uintptr_t) 4 << 30;
|
||||
hiaddr = UINT32_MAX;
|
||||
} else {
|
||||
offset = -(HI_COMMPAGE & -align);
|
||||
}
|
||||
|
@ -2701,7 +2719,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
|
|||
loaddr = MIN(loaddr, LO_COMMPAGE & -align);
|
||||
}
|
||||
|
||||
addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
|
||||
addr = pgb_find_hole(loaddr, hiaddr - loaddr + 1, align, offset);
|
||||
if (addr == -1) {
|
||||
/*
|
||||
* If HI_COMMPAGE, there *might* be a non-consecutive allocation
|
||||
|
@ -2767,17 +2785,17 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
|
|||
/* Reserve the memory on the host. */
|
||||
assert(guest_base != 0);
|
||||
test = g2h_untagged(0);
|
||||
addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
|
||||
addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0);
|
||||
if (addr == MAP_FAILED || addr != test) {
|
||||
error_report("Unable to reserve 0x%lx bytes of virtual address "
|
||||
"space at %p (%s) for use as guest address space (check your "
|
||||
"virtual memory ulimit setting, min_mmap_addr or reserve less "
|
||||
"using -R option)", reserved_va, test, strerror(errno));
|
||||
"using -R option)", reserved_va + 1, test, strerror(errno));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
|
||||
__func__, addr, reserved_va);
|
||||
__func__, addr, reserved_va + 1);
|
||||
}
|
||||
|
||||
void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
|
||||
|
@ -3020,7 +3038,7 @@ static void load_elf_image(const char *image_name, int image_fd,
|
|||
if (a < loaddr) {
|
||||
loaddr = a;
|
||||
}
|
||||
a = eppnt->p_vaddr + eppnt->p_memsz;
|
||||
a = eppnt->p_vaddr + eppnt->p_memsz - 1;
|
||||
if (a > hiaddr) {
|
||||
hiaddr = a;
|
||||
}
|
||||
|
@ -3111,7 +3129,7 @@ static void load_elf_image(const char *image_name, int image_fd,
|
|||
* In both cases, we will overwrite pages in this range with mappings
|
||||
* from the executable.
|
||||
*/
|
||||
load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
|
||||
load_addr = target_mmap(loaddr, (size_t)hiaddr - loaddr + 1, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
|
||||
(ehdr->e_type == ET_EXEC ? MAP_FIXED : 0),
|
||||
-1, 0);
|
||||
|
|
|
@ -448,7 +448,7 @@ static int load_flat_file(struct linux_binprm * bprm,
|
|||
* Allocate the address space.
|
||||
*/
|
||||
probe_guest_base(bprm->filename, 0,
|
||||
text_len + data_len + extra + indx_len);
|
||||
text_len + data_len + extra + indx_len - 1);
|
||||
|
||||
/*
|
||||
* there are a couple of cases here, the separate code/data
|
||||
|
|
|
@ -109,11 +109,9 @@ static const char *last_log_filename;
|
|||
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
|
||||
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
|
||||
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
|
||||
/* There are a number of places where we assign reserved_va to a variable
|
||||
of type abi_ulong and expect it to fit. Avoid the last page. */
|
||||
# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK)
|
||||
# define MAX_RESERVED_VA(CPU) 0xfffffffful
|
||||
# else
|
||||
# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
|
||||
# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
|
||||
# endif
|
||||
# else
|
||||
# define MAX_RESERVED_VA(CPU) 0
|
||||
|
@ -379,7 +377,9 @@ static void handle_arg_reserved_va(const char *arg)
|
|||
{
|
||||
char *p;
|
||||
int shift = 0;
|
||||
reserved_va = strtoul(arg, &p, 0);
|
||||
unsigned long val;
|
||||
|
||||
val = strtoul(arg, &p, 0);
|
||||
switch (*p) {
|
||||
case 'k':
|
||||
case 'K':
|
||||
|
@ -393,10 +393,10 @@ static void handle_arg_reserved_va(const char *arg)
|
|||
break;
|
||||
}
|
||||
if (shift) {
|
||||
unsigned long unshifted = reserved_va;
|
||||
unsigned long unshifted = val;
|
||||
p++;
|
||||
reserved_va <<= shift;
|
||||
if (reserved_va >> shift != unshifted) {
|
||||
val <<= shift;
|
||||
if (val >> shift != unshifted) {
|
||||
fprintf(stderr, "Reserved virtual address too big\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
@ -405,6 +405,8 @@ static void handle_arg_reserved_va(const char *arg)
|
|||
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
/* The representation is size - 1, with 0 remaining "default". */
|
||||
reserved_va = val ? val - 1 : 0;
|
||||
}
|
||||
|
||||
static void handle_arg_singlestep(const char *arg)
|
||||
|
@ -793,16 +795,19 @@ int main(int argc, char **argv, char **envp)
|
|||
*/
|
||||
max_reserved_va = MAX_RESERVED_VA(cpu);
|
||||
if (reserved_va != 0) {
|
||||
if ((reserved_va + 1) % qemu_host_page_size) {
|
||||
char *s = size_to_str(qemu_host_page_size);
|
||||
fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
|
||||
g_free(s);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if (max_reserved_va && reserved_va > max_reserved_va) {
|
||||
fprintf(stderr, "Reserved virtual address too big\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
} else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) {
|
||||
/*
|
||||
* reserved_va must be aligned with the host page size
|
||||
* as it is used with mmap()
|
||||
*/
|
||||
reserved_va = max_reserved_va & qemu_host_page_mask;
|
||||
/* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
|
||||
reserved_va = max_reserved_va;
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
@ -181,7 +181,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
|
|||
}
|
||||
}
|
||||
|
||||
page_set_flags(start, start + len, page_flags);
|
||||
page_set_flags(start, start + len - 1, page_flags);
|
||||
ret = 0;
|
||||
|
||||
error:
|
||||
|
@ -283,7 +283,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
|
|||
end_addr = start + size;
|
||||
if (start > reserved_va - size) {
|
||||
/* Start at the top of the address space. */
|
||||
end_addr = ((reserved_va - size) & -align) + size;
|
||||
end_addr = ((reserved_va + 1 - size) & -align) + size;
|
||||
looped = true;
|
||||
}
|
||||
|
||||
|
@ -297,7 +297,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
|
|||
return (abi_ulong)-1;
|
||||
}
|
||||
/* Re-start at the top of the address space. */
|
||||
addr = end_addr = ((reserved_va - size) & -align) + size;
|
||||
addr = end_addr = ((reserved_va + 1 - size) & -align) + size;
|
||||
looped = true;
|
||||
} else {
|
||||
prot = page_get_flags(addr);
|
||||
|
@ -640,15 +640,15 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
|
|||
}
|
||||
page_flags |= PAGE_RESET;
|
||||
if (passthrough_start == passthrough_end) {
|
||||
page_set_flags(start, start + len, page_flags);
|
||||
page_set_flags(start, start + len - 1, page_flags);
|
||||
} else {
|
||||
if (start < passthrough_start) {
|
||||
page_set_flags(start, passthrough_start, page_flags);
|
||||
page_set_flags(start, passthrough_start - 1, page_flags);
|
||||
}
|
||||
page_set_flags(passthrough_start, passthrough_end,
|
||||
page_set_flags(passthrough_start, passthrough_end - 1,
|
||||
page_flags | PAGE_PASSTHROUGH);
|
||||
if (passthrough_end < start + len) {
|
||||
page_set_flags(passthrough_end, start + len, page_flags);
|
||||
page_set_flags(passthrough_end, start + len - 1, page_flags);
|
||||
}
|
||||
}
|
||||
the_end:
|
||||
|
@ -763,7 +763,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
|
|||
}
|
||||
|
||||
if (ret == 0) {
|
||||
page_set_flags(start, start + len, 0);
|
||||
page_set_flags(start, start + len - 1, 0);
|
||||
}
|
||||
mmap_unlock();
|
||||
return ret;
|
||||
|
@ -849,8 +849,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
|||
} else {
|
||||
new_addr = h2g(host_addr);
|
||||
prot = page_get_flags(old_addr);
|
||||
page_set_flags(old_addr, old_addr + old_size, 0);
|
||||
page_set_flags(new_addr, new_addr + new_size,
|
||||
page_set_flags(old_addr, old_addr + old_size - 1, 0);
|
||||
page_set_flags(new_addr, new_addr + new_size - 1,
|
||||
prot | PAGE_VALID | PAGE_RESET);
|
||||
}
|
||||
mmap_unlock();
|
||||
|
@ -946,7 +946,7 @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
|
|||
if (can_passthrough_madvise(start, end)) {
|
||||
ret = get_errno(madvise(g2h_untagged(start), len, advice));
|
||||
if ((advice == MADV_DONTNEED) && (ret == 0)) {
|
||||
page_reset_target_data(start, start + len);
|
||||
page_reset_target_data(start, start + len - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4595,7 +4595,7 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
|
|||
}
|
||||
raddr=h2g((unsigned long)host_raddr);
|
||||
|
||||
page_set_flags(raddr, raddr + shm_info.shm_segsz,
|
||||
page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
|
||||
PAGE_VALID | PAGE_RESET | PAGE_READ |
|
||||
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
|
||||
|
||||
|
@ -4625,7 +4625,7 @@ static inline abi_long do_shmdt(abi_ulong shmaddr)
|
|||
for (i = 0; i < N_SHM_REGIONS; ++i) {
|
||||
if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
|
||||
shm_regions[i].in_use = false;
|
||||
page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
|
||||
page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,19 +76,19 @@ void fork_end(int child);
|
|||
/**
|
||||
* probe_guest_base:
|
||||
* @image_name: the executable being loaded
|
||||
* @loaddr: the lowest fixed address in the executable
|
||||
* @hiaddr: the highest fixed address in the executable
|
||||
* @loaddr: the lowest fixed address within the executable
|
||||
* @hiaddr: the highest fixed address within the executable
|
||||
*
|
||||
* Creates the initial guest address space in the host memory space.
|
||||
*
|
||||
* If @loaddr == 0, then no address in the executable is fixed,
|
||||
* i.e. it is fully relocatable. In that case @hiaddr is the size
|
||||
* of the executable.
|
||||
* If @loaddr == 0, then no address in the executable is fixed, i.e.
|
||||
* it is fully relocatable. In that case @hiaddr is the size of the
|
||||
* executable minus one.
|
||||
*
|
||||
* This function will not return if a valid value for guest_base
|
||||
* cannot be chosen. On return, the executable loader can expect
|
||||
*
|
||||
* target_mmap(loaddr, hiaddr - loaddr, ...)
|
||||
* target_mmap(loaddr, hiaddr - loaddr + 1, ...)
|
||||
*
|
||||
* to succeed.
|
||||
*/
|
||||
|
|
|
@ -508,6 +508,10 @@ glib = declare_dependency(compile_args: config_host['GLIB_CFLAGS'].split(),
|
|||
})
|
||||
# override glib dep with the configure results (for subprojects)
|
||||
meson.override_dependency('glib-2.0', glib)
|
||||
# pass down whether Glib has the slice allocator
|
||||
if config_host.has_key('HAVE_GLIB_WITH_SLICE_ALLOCATOR')
|
||||
config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', true)
|
||||
endif
|
||||
|
||||
gio = not_found
|
||||
gdbus_codegen = not_found
|
||||
|
|
|
@ -5,11 +5,11 @@ specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files(
|
|||
'physmem.c',
|
||||
'qtest.c',
|
||||
'dirtylimit.c',
|
||||
'watchpoint.c',
|
||||
)])
|
||||
|
||||
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: [files(
|
||||
'icount.c',
|
||||
'watchpoint.c',
|
||||
)])
|
||||
|
||||
softmmu_ss.add(files(
|
||||
|
|
|
@ -2527,7 +2527,7 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
|
|||
}
|
||||
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
|
||||
assert(tcg_enabled());
|
||||
tb_invalidate_phys_range(addr, addr + length);
|
||||
tb_invalidate_phys_range(addr, addr + length - 1);
|
||||
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
|
||||
}
|
||||
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/translate-all.h"
|
||||
#include "sysemu/tcg.h"
|
||||
|
@ -103,6 +104,8 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
|
||||
/*
|
||||
* Return true if this watchpoint address matches the specified
|
||||
* access (ie the address range covered by the watchpoint overlaps
|
||||
|
@ -219,3 +222,5 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_TCG */
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "exec/ram_addr.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "hw/core/tcg-cpu-ops.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/guest-random.h"
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "tcg/tcg.h"
|
||||
#include "vec_internal.h"
|
||||
#include "sve_ldst_internal.h"
|
||||
#include "hw/core/tcg-cpu-ops.h"
|
||||
|
||||
|
||||
/* Return a value for NZCV as per the ARM PredTest pseudofunction.
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "exec/helper-proto.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "hw/core/tcg-cpu-ops.h"
|
||||
#include "qemu/int128.h"
|
||||
#include "qemu/atomic128.h"
|
||||
#include "trace.h"
|
||||
|
|
19
tcg/region.c
19
tcg/region.c
|
@ -28,6 +28,7 @@
|
|||
#include "qemu/mprotect.h"
|
||||
#include "qemu/memalign.h"
|
||||
#include "qemu/cacheinfo.h"
|
||||
#include "qemu/qtree.h"
|
||||
#include "qapi/error.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "tcg/tcg.h"
|
||||
|
@ -36,7 +37,7 @@
|
|||
|
||||
struct tcg_region_tree {
|
||||
QemuMutex lock;
|
||||
GTree *tree;
|
||||
QTree *tree;
|
||||
/* padding to avoid false sharing is computed at run-time */
|
||||
};
|
||||
|
||||
|
@ -163,7 +164,7 @@ static void tcg_region_trees_init(void)
|
|||
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
||||
|
||||
qemu_mutex_init(&rt->lock);
|
||||
rt->tree = g_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
|
||||
rt->tree = q_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,7 +203,7 @@ void tcg_tb_insert(TranslationBlock *tb)
|
|||
|
||||
g_assert(rt != NULL);
|
||||
qemu_mutex_lock(&rt->lock);
|
||||
g_tree_insert(rt->tree, &tb->tc, tb);
|
||||
q_tree_insert(rt->tree, &tb->tc, tb);
|
||||
qemu_mutex_unlock(&rt->lock);
|
||||
}
|
||||
|
||||
|
@ -212,7 +213,7 @@ void tcg_tb_remove(TranslationBlock *tb)
|
|||
|
||||
g_assert(rt != NULL);
|
||||
qemu_mutex_lock(&rt->lock);
|
||||
g_tree_remove(rt->tree, &tb->tc);
|
||||
q_tree_remove(rt->tree, &tb->tc);
|
||||
qemu_mutex_unlock(&rt->lock);
|
||||
}
|
||||
|
||||
|
@ -232,7 +233,7 @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
|
|||
}
|
||||
|
||||
qemu_mutex_lock(&rt->lock);
|
||||
tb = g_tree_lookup(rt->tree, &s);
|
||||
tb = q_tree_lookup(rt->tree, &s);
|
||||
qemu_mutex_unlock(&rt->lock);
|
||||
return tb;
|
||||
}
|
||||
|
@ -267,7 +268,7 @@ void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
|
|||
for (i = 0; i < region.n; i++) {
|
||||
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
||||
|
||||
g_tree_foreach(rt->tree, func, user_data);
|
||||
q_tree_foreach(rt->tree, func, user_data);
|
||||
}
|
||||
tcg_region_tree_unlock_all();
|
||||
}
|
||||
|
@ -281,7 +282,7 @@ size_t tcg_nb_tbs(void)
|
|||
for (i = 0; i < region.n; i++) {
|
||||
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
||||
|
||||
nb_tbs += g_tree_nnodes(rt->tree);
|
||||
nb_tbs += q_tree_nnodes(rt->tree);
|
||||
}
|
||||
tcg_region_tree_unlock_all();
|
||||
return nb_tbs;
|
||||
|
@ -296,8 +297,8 @@ static void tcg_region_tree_reset_all(void)
|
|||
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
||||
|
||||
/* Increment the refcount first so that destroy acts as a reset */
|
||||
g_tree_ref(rt->tree);
|
||||
g_tree_destroy(rt->tree);
|
||||
q_tree_ref(rt->tree);
|
||||
q_tree_destroy(rt->tree);
|
||||
}
|
||||
tcg_region_tree_unlock_all();
|
||||
}
|
||||
|
|
|
@ -9,6 +9,10 @@ xbzrle_bench = executable('xbzrle-bench',
|
|||
dependencies: [qemuutil,migration])
|
||||
endif
|
||||
|
||||
qtree_bench = executable('qtree-bench',
|
||||
sources: 'qtree-bench.c',
|
||||
dependencies: [qemuutil])
|
||||
|
||||
executable('atomic_add-bench',
|
||||
sources: files('atomic_add-bench.c'),
|
||||
dependencies: [qemuutil],
|
||||
|
|
|
@ -0,0 +1,286 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/qtree.h"
|
||||
#include "qemu/timer.h"
|
||||
|
||||
enum tree_op {
|
||||
OP_LOOKUP,
|
||||
OP_INSERT,
|
||||
OP_REMOVE,
|
||||
OP_REMOVE_ALL,
|
||||
OP_TRAVERSE,
|
||||
};
|
||||
|
||||
struct benchmark {
|
||||
const char * const name;
|
||||
enum tree_op op;
|
||||
bool fill_on_init;
|
||||
};
|
||||
|
||||
enum impl_type {
|
||||
IMPL_GTREE,
|
||||
IMPL_QTREE,
|
||||
};
|
||||
|
||||
struct tree_implementation {
|
||||
const char * const name;
|
||||
enum impl_type type;
|
||||
};
|
||||
|
||||
static const struct benchmark benchmarks[] = {
|
||||
{
|
||||
.name = "Lookup",
|
||||
.op = OP_LOOKUP,
|
||||
.fill_on_init = true,
|
||||
},
|
||||
{
|
||||
.name = "Insert",
|
||||
.op = OP_INSERT,
|
||||
.fill_on_init = false,
|
||||
},
|
||||
{
|
||||
.name = "Remove",
|
||||
.op = OP_REMOVE,
|
||||
.fill_on_init = true,
|
||||
},
|
||||
{
|
||||
.name = "RemoveAll",
|
||||
.op = OP_REMOVE_ALL,
|
||||
.fill_on_init = true,
|
||||
},
|
||||
{
|
||||
.name = "Traverse",
|
||||
.op = OP_TRAVERSE,
|
||||
.fill_on_init = true,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct tree_implementation impls[] = {
|
||||
{
|
||||
.name = "GTree",
|
||||
.type = IMPL_GTREE,
|
||||
},
|
||||
{
|
||||
.name = "QTree",
|
||||
.type = IMPL_QTREE,
|
||||
},
|
||||
};
|
||||
|
||||
static int compare_func(const void *ap, const void *bp)
|
||||
{
|
||||
const size_t *a = ap;
|
||||
const size_t *b = bp;
|
||||
|
||||
return *a - *b;
|
||||
}
|
||||
|
||||
static void init_empty_tree_and_keys(enum impl_type impl,
|
||||
void **ret_tree, size_t **ret_keys,
|
||||
size_t n_elems)
|
||||
{
|
||||
size_t *keys = g_malloc_n(n_elems, sizeof(*keys));
|
||||
for (size_t i = 0; i < n_elems; i++) {
|
||||
keys[i] = i;
|
||||
}
|
||||
|
||||
void *tree;
|
||||
switch (impl) {
|
||||
case IMPL_GTREE:
|
||||
tree = g_tree_new(compare_func);
|
||||
break;
|
||||
case IMPL_QTREE:
|
||||
tree = q_tree_new(compare_func);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
*ret_tree = tree;
|
||||
*ret_keys = keys;
|
||||
}
|
||||
|
||||
static gboolean traverse_func(gpointer key, gpointer value, gpointer data)
|
||||
{
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static inline void remove_all(void *tree, enum impl_type impl)
|
||||
{
|
||||
switch (impl) {
|
||||
case IMPL_GTREE:
|
||||
g_tree_destroy(tree);
|
||||
break;
|
||||
case IMPL_QTREE:
|
||||
q_tree_destroy(tree);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t run_benchmark(const struct benchmark *bench,
|
||||
enum impl_type impl,
|
||||
size_t n_elems)
|
||||
{
|
||||
void *tree;
|
||||
size_t *keys;
|
||||
|
||||
init_empty_tree_and_keys(impl, &tree, &keys, n_elems);
|
||||
if (bench->fill_on_init) {
|
||||
for (size_t i = 0; i < n_elems; i++) {
|
||||
switch (impl) {
|
||||
case IMPL_GTREE:
|
||||
g_tree_insert(tree, &keys[i], &keys[i]);
|
||||
break;
|
||||
case IMPL_QTREE:
|
||||
q_tree_insert(tree, &keys[i], &keys[i]);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int64_t start_ns = get_clock();
|
||||
switch (bench->op) {
|
||||
case OP_LOOKUP:
|
||||
for (size_t i = 0; i < n_elems; i++) {
|
||||
void *value;
|
||||
switch (impl) {
|
||||
case IMPL_GTREE:
|
||||
value = g_tree_lookup(tree, &keys[i]);
|
||||
break;
|
||||
case IMPL_QTREE:
|
||||
value = q_tree_lookup(tree, &keys[i]);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
(void)value;
|
||||
}
|
||||
break;
|
||||
case OP_INSERT:
|
||||
for (size_t i = 0; i < n_elems; i++) {
|
||||
switch (impl) {
|
||||
case IMPL_GTREE:
|
||||
g_tree_insert(tree, &keys[i], &keys[i]);
|
||||
break;
|
||||
case IMPL_QTREE:
|
||||
q_tree_insert(tree, &keys[i], &keys[i]);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
break;
|
||||
case OP_REMOVE:
|
||||
for (size_t i = 0; i < n_elems; i++) {
|
||||
switch (impl) {
|
||||
case IMPL_GTREE:
|
||||
g_tree_remove(tree, &keys[i]);
|
||||
break;
|
||||
case IMPL_QTREE:
|
||||
q_tree_remove(tree, &keys[i]);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
break;
|
||||
case OP_REMOVE_ALL:
|
||||
remove_all(tree, impl);
|
||||
break;
|
||||
case OP_TRAVERSE:
|
||||
switch (impl) {
|
||||
case IMPL_GTREE:
|
||||
g_tree_foreach(tree, traverse_func, NULL);
|
||||
break;
|
||||
case IMPL_QTREE:
|
||||
q_tree_foreach(tree, traverse_func, NULL);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
int64_t ns = get_clock() - start_ns;
|
||||
|
||||
if (bench->op != OP_REMOVE_ALL) {
|
||||
remove_all(tree, impl);
|
||||
}
|
||||
g_free(keys);
|
||||
|
||||
return ns;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
size_t sizes[] = {
|
||||
32,
|
||||
1024,
|
||||
1024 * 4,
|
||||
1024 * 128,
|
||||
1024 * 1024,
|
||||
};
|
||||
|
||||
double res[ARRAY_SIZE(benchmarks)][ARRAY_SIZE(impls)][ARRAY_SIZE(sizes)];
|
||||
for (int i = 0; i < ARRAY_SIZE(sizes); i++) {
|
||||
size_t size = sizes[i];
|
||||
for (int j = 0; j < ARRAY_SIZE(impls); j++) {
|
||||
const struct tree_implementation *impl = &impls[j];
|
||||
for (int k = 0; k < ARRAY_SIZE(benchmarks); k++) {
|
||||
const struct benchmark *bench = &benchmarks[k];
|
||||
|
||||
/* warm-up run */
|
||||
run_benchmark(bench, impl->type, size);
|
||||
|
||||
int64_t total_ns = 0;
|
||||
int64_t n_runs = 0;
|
||||
while (total_ns < 2e8 || n_runs < 5) {
|
||||
total_ns += run_benchmark(bench, impl->type, size);
|
||||
n_runs++;
|
||||
}
|
||||
double ns_per_run = (double)total_ns / n_runs;
|
||||
|
||||
/* Throughput, in Mops/s */
|
||||
res[k][j][i] = size / ns_per_run * 1e3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
printf("# Results' breakdown: Tree, Op and #Elements. Units: Mops/s\n");
|
||||
printf("%5s %10s ", "Tree", "Op");
|
||||
for (int i = 0; i < ARRAY_SIZE(sizes); i++) {
|
||||
printf("%7zu ", sizes[i]);
|
||||
}
|
||||
printf("\n");
|
||||
char separator[97];
|
||||
for (int i = 0; i < ARRAY_SIZE(separator) - 1; i++) {
|
||||
separator[i] = '-';
|
||||
}
|
||||
separator[ARRAY_SIZE(separator) - 1] = '\0';
|
||||
printf("%s\n", separator);
|
||||
for (int i = 0; i < ARRAY_SIZE(benchmarks); i++) {
|
||||
for (int j = 0; j < ARRAY_SIZE(impls); j++) {
|
||||
printf("%5s %10s ", impls[j].name, benchmarks[i].name);
|
||||
for (int k = 0; k < ARRAY_SIZE(sizes); k++) {
|
||||
printf("%7.2f ", res[i][j][k]);
|
||||
if (j == 0) {
|
||||
printf(" ");
|
||||
} else {
|
||||
if (res[i][0][k] != 0) {
|
||||
double speedup = res[i][j][k] / res[i][0][k];
|
||||
printf("(%4.2fx) ", speedup);
|
||||
} else {
|
||||
printf("( ) ");
|
||||
}
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
printf("%s\n", separator);
|
||||
return 0;
|
||||
}
|
|
@ -36,6 +36,7 @@ tests = {
|
|||
'test-rcu-slist': [],
|
||||
'test-qdist': [],
|
||||
'test-qht': [],
|
||||
'test-qtree': [],
|
||||
'test-bitops': [],
|
||||
'test-bitcnt': [],
|
||||
'test-qgraph': ['../qtest/libqos/qgraph.c'],
|
||||
|
|
|
@ -0,0 +1,333 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*
|
||||
* Tests for QTree.
|
||||
* Original source: glib
|
||||
* https://gitlab.gnome.org/GNOME/glib/-/blob/main/glib/tests/tree.c
|
||||
* LGPL license.
|
||||
* Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/qtree.h"
|
||||
|
||||
static gint my_compare(gconstpointer a, gconstpointer b)
|
||||
{
|
||||
const char *cha = a;
|
||||
const char *chb = b;
|
||||
|
||||
return *cha - *chb;
|
||||
}
|
||||
|
||||
static gint my_compare_with_data(gconstpointer a,
|
||||
gconstpointer b,
|
||||
gpointer user_data)
|
||||
{
|
||||
const char *cha = a;
|
||||
const char *chb = b;
|
||||
|
||||
/* just check that we got the right data */
|
||||
g_assert(GPOINTER_TO_INT(user_data) == 123);
|
||||
|
||||
return *cha - *chb;
|
||||
}
|
||||
|
||||
static gint my_search(gconstpointer a, gconstpointer b)
|
||||
{
|
||||
return my_compare(b, a);
|
||||
}
|
||||
|
||||
static gpointer destroyed_key;
|
||||
static gpointer destroyed_value;
|
||||
static guint destroyed_key_count;
|
||||
static guint destroyed_value_count;
|
||||
|
||||
static void my_key_destroy(gpointer key)
|
||||
{
|
||||
destroyed_key = key;
|
||||
destroyed_key_count++;
|
||||
}
|
||||
|
||||
static void my_value_destroy(gpointer value)
|
||||
{
|
||||
destroyed_value = value;
|
||||
destroyed_value_count++;
|
||||
}
|
||||
|
||||
static gint my_traverse(gpointer key, gpointer value, gpointer data)
|
||||
{
|
||||
char *ch = key;
|
||||
|
||||
g_assert((*ch) > 0);
|
||||
|
||||
if (*ch == 'd') {
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
char chars[] =
|
||||
"0123456789"
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
"abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
char chars2[] =
|
||||
"0123456789"
|
||||
"abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
static gint check_order(gpointer key, gpointer value, gpointer data)
|
||||
{
|
||||
char **p = data;
|
||||
char *ch = key;
|
||||
|
||||
g_assert(**p == *ch);
|
||||
|
||||
(*p)++;
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static void test_tree_search(void)
|
||||
{
|
||||
gint i;
|
||||
QTree *tree;
|
||||
gboolean removed;
|
||||
gchar c;
|
||||
gchar *p, *d;
|
||||
|
||||
tree = q_tree_new_with_data(my_compare_with_data, GINT_TO_POINTER(123));
|
||||
|
||||
for (i = 0; chars[i]; i++) {
|
||||
q_tree_insert(tree, &chars[i], &chars[i]);
|
||||
}
|
||||
|
||||
q_tree_foreach(tree, my_traverse, NULL);
|
||||
|
||||
g_assert(q_tree_nnodes(tree) == strlen(chars));
|
||||
g_assert(q_tree_height(tree) == 6);
|
||||
|
||||
p = chars;
|
||||
q_tree_foreach(tree, check_order, &p);
|
||||
|
||||
for (i = 0; i < 26; i++) {
|
||||
removed = q_tree_remove(tree, &chars[i + 10]);
|
||||
g_assert(removed);
|
||||
}
|
||||
|
||||
c = '\0';
|
||||
removed = q_tree_remove(tree, &c);
|
||||
g_assert(!removed);
|
||||
|
||||
q_tree_foreach(tree, my_traverse, NULL);
|
||||
|
||||
g_assert(q_tree_nnodes(tree) == strlen(chars2));
|
||||
g_assert(q_tree_height(tree) == 6);
|
||||
|
||||
p = chars2;
|
||||
q_tree_foreach(tree, check_order, &p);
|
||||
|
||||
for (i = 25; i >= 0; i--) {
|
||||
q_tree_insert(tree, &chars[i + 10], &chars[i + 10]);
|
||||
}
|
||||
|
||||
p = chars;
|
||||
q_tree_foreach(tree, check_order, &p);
|
||||
|
||||
c = '0';
|
||||
p = q_tree_lookup(tree, &c);
|
||||
g_assert(p && *p == c);
|
||||
g_assert(q_tree_lookup_extended(tree, &c, (gpointer *)&d, (gpointer *)&p));
|
||||
g_assert(c == *d && c == *p);
|
||||
|
||||
c = 'A';
|
||||
p = q_tree_lookup(tree, &c);
|
||||
g_assert(p && *p == c);
|
||||
|
||||
c = 'a';
|
||||
p = q_tree_lookup(tree, &c);
|
||||
g_assert(p && *p == c);
|
||||
|
||||
c = 'z';
|
||||
p = q_tree_lookup(tree, &c);
|
||||
g_assert(p && *p == c);
|
||||
|
||||
c = '!';
|
||||
p = q_tree_lookup(tree, &c);
|
||||
g_assert(p == NULL);
|
||||
|
||||
c = '=';
|
||||
p = q_tree_lookup(tree, &c);
|
||||
g_assert(p == NULL);
|
||||
|
||||
c = '|';
|
||||
p = q_tree_lookup(tree, &c);
|
||||
g_assert(p == NULL);
|
||||
|
||||
c = '0';
|
||||
p = q_tree_search(tree, my_search, &c);
|
||||
g_assert(p && *p == c);
|
||||
|
||||
c = 'A';
|
||||
p = q_tree_search(tree, my_search, &c);
|
||||
g_assert(p && *p == c);
|
||||
|
||||
c = 'a';
|
||||
p = q_tree_search(tree, my_search, &c);
|
||||
g_assert(p && *p == c);
|
||||
|
||||
c = 'z';
|
||||
p = q_tree_search(tree, my_search, &c);
|
||||
g_assert(p && *p == c);
|
||||
|
||||
c = '!';
|
||||
p = q_tree_search(tree, my_search, &c);
|
||||
g_assert(p == NULL);
|
||||
|
||||
c = '=';
|
||||
p = q_tree_search(tree, my_search, &c);
|
||||
g_assert(p == NULL);
|
||||
|
||||
c = '|';
|
||||
p = q_tree_search(tree, my_search, &c);
|
||||
g_assert(p == NULL);
|
||||
|
||||
q_tree_destroy(tree);
|
||||
}
|
||||
|
||||
static void test_tree_remove(void)
|
||||
{
|
||||
QTree *tree;
|
||||
char c, d;
|
||||
gint i;
|
||||
gboolean removed;
|
||||
|
||||
tree = q_tree_new_full((GCompareDataFunc)my_compare, NULL,
|
||||
my_key_destroy,
|
||||
my_value_destroy);
|
||||
|
||||
for (i = 0; chars[i]; i++) {
|
||||
q_tree_insert(tree, &chars[i], &chars[i]);
|
||||
}
|
||||
|
||||
c = '0';
|
||||
q_tree_insert(tree, &c, &c);
|
||||
g_assert(destroyed_key == &c);
|
||||
g_assert(destroyed_value == &chars[0]);
|
||||
destroyed_key = NULL;
|
||||
destroyed_value = NULL;
|
||||
|
||||
d = '1';
|
||||
q_tree_replace(tree, &d, &d);
|
||||
g_assert(destroyed_key == &chars[1]);
|
||||
g_assert(destroyed_value == &chars[1]);
|
||||
destroyed_key = NULL;
|
||||
destroyed_value = NULL;
|
||||
|
||||
c = '2';
|
||||
removed = q_tree_remove(tree, &c);
|
||||
g_assert(removed);
|
||||
g_assert(destroyed_key == &chars[2]);
|
||||
g_assert(destroyed_value == &chars[2]);
|
||||
destroyed_key = NULL;
|
||||
destroyed_value = NULL;
|
||||
|
||||
c = '3';
|
||||
removed = q_tree_steal(tree, &c);
|
||||
g_assert(removed);
|
||||
g_assert(destroyed_key == NULL);
|
||||
g_assert(destroyed_value == NULL);
|
||||
|
||||
const gchar *remove = "omkjigfedba";
|
||||
for (i = 0; remove[i]; i++) {
|
||||
removed = q_tree_remove(tree, &remove[i]);
|
||||
g_assert(removed);
|
||||
}
|
||||
|
||||
q_tree_destroy(tree);
|
||||
}
|
||||
|
||||
static void test_tree_destroy(void)
|
||||
{
|
||||
QTree *tree;
|
||||
gint i;
|
||||
|
||||
tree = q_tree_new(my_compare);
|
||||
|
||||
for (i = 0; chars[i]; i++) {
|
||||
q_tree_insert(tree, &chars[i], &chars[i]);
|
||||
}
|
||||
|
||||
g_assert(q_tree_nnodes(tree) == strlen(chars));
|
||||
|
||||
g_test_message("nnodes: %d", q_tree_nnodes(tree));
|
||||
q_tree_ref(tree);
|
||||
q_tree_destroy(tree);
|
||||
|
||||
g_test_message("nnodes: %d", q_tree_nnodes(tree));
|
||||
g_assert(q_tree_nnodes(tree) == 0);
|
||||
|
||||
q_tree_unref(tree);
|
||||
}
|
||||
|
||||
static void test_tree_insert(void)
|
||||
{
|
||||
QTree *tree;
|
||||
gchar *p;
|
||||
gint i;
|
||||
gchar *scrambled;
|
||||
|
||||
tree = q_tree_new(my_compare);
|
||||
|
||||
for (i = 0; chars[i]; i++) {
|
||||
q_tree_insert(tree, &chars[i], &chars[i]);
|
||||
}
|
||||
p = chars;
|
||||
q_tree_foreach(tree, check_order, &p);
|
||||
|
||||
q_tree_unref(tree);
|
||||
tree = q_tree_new(my_compare);
|
||||
|
||||
for (i = strlen(chars) - 1; i >= 0; i--) {
|
||||
q_tree_insert(tree, &chars[i], &chars[i]);
|
||||
}
|
||||
p = chars;
|
||||
q_tree_foreach(tree, check_order, &p);
|
||||
|
||||
q_tree_unref(tree);
|
||||
tree = q_tree_new(my_compare);
|
||||
|
||||
scrambled = g_strdup(chars);
|
||||
|
||||
for (i = 0; i < 30; i++) {
|
||||
gchar tmp;
|
||||
gint a, b;
|
||||
|
||||
a = g_random_int_range(0, strlen(scrambled));
|
||||
b = g_random_int_range(0, strlen(scrambled));
|
||||
tmp = scrambled[a];
|
||||
scrambled[a] = scrambled[b];
|
||||
scrambled[b] = tmp;
|
||||
}
|
||||
|
||||
for (i = 0; scrambled[i]; i++) {
|
||||
q_tree_insert(tree, &scrambled[i], &scrambled[i]);
|
||||
}
|
||||
p = chars;
|
||||
q_tree_foreach(tree, check_order, &p);
|
||||
|
||||
g_free(scrambled);
|
||||
q_tree_unref(tree);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
g_test_init(&argc, &argv, NULL);
|
||||
|
||||
g_test_add_func("/qtree/search", test_tree_search);
|
||||
g_test_add_func("/qtree/remove", test_tree_remove);
|
||||
g_test_add_func("/qtree/destroy", test_tree_destroy);
|
||||
g_test_add_func("/qtree/insert", test_tree_insert);
|
||||
|
||||
return g_test_run();
|
||||
}
|
|
@ -26,6 +26,7 @@ util_ss.add(when: 'CONFIG_WIN32', if_true: files('oslib-win32.c'))
|
|||
util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c'))
|
||||
util_ss.add(when: 'CONFIG_WIN32', if_true: winmm)
|
||||
util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch)
|
||||
util_ss.add(when: 'HAVE_GLIB_WITH_SLICE_ALLOCATOR', if_true: files('qtree.c'))
|
||||
util_ss.add(files('envlist.c', 'path.c', 'module.c'))
|
||||
util_ss.add(files('host-utils.c'))
|
||||
util_ss.add(files('bitmap.c', 'bitops.c'))
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue