mirror of https://github.com/xemu-project/xemu.git
util/interval-tree: Access left/right/parent atomically
accel/tcg: Clear gen_tb on buffer overflow bsd-user: Specify host page alignment if none specified bsd-user: Allocate guest virtual address space target/ppc: Disable goto_tb with architectural singlestep target/s390x: Move trans_exc_code update to do_program_interrupt -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmTIIQUdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV87JAf/ZgJTq26oniJ4TLkS 2UVBEcxGnnA2L1n4zcXG1o0onT5dAqm/6YjSlVD7C+Ol8pzQMomJKcWLL/jrCEUp rQXPV9ibD5bCtO47MY3ZS3aW3pqOhXOeKUFer1+YHWRRyi9Y6kEx0d2No3MSGo18 S5A6zPwqduQvZPBPVualmtdIrpTasxhUdNfbqBW31pxYpCNg1wqIiwKoLcD5NJeX epVhaUi/7TwqljrK7SGXmmfDWiTHIXDtvPrJQcSYGgqpVNFzRuq6jTXRJObeWen0 DhOHqC0Z6OkZ2gU+eso/VRbcbawQNQohUHQzZ7c0643TxncPDKG82/MDRe2MTJnq /z+jpw== =Z8UY -----END PGP SIGNATURE----- Merge tag 'pull-tcg-20230731' of https://gitlab.com/rth7680/qemu into staging util/interval-tree: Access left/right/parent atomically accel/tcg: Clear gen_tb on buffer overflow bsd-user: Specify host page alignment if none specified bsd-user: Allocate guest virtual address space target/ppc: Disable goto_tb with architectural singlestep target/s390x: Move trans_exc_code update to do_program_interrupt # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmTIIQUdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV87JAf/ZgJTq26oniJ4TLkS # 2UVBEcxGnnA2L1n4zcXG1o0onT5dAqm/6YjSlVD7C+Ol8pzQMomJKcWLL/jrCEUp # rQXPV9ibD5bCtO47MY3ZS3aW3pqOhXOeKUFer1+YHWRRyi9Y6kEx0d2No3MSGo18 # S5A6zPwqduQvZPBPVualmtdIrpTasxhUdNfbqBW31pxYpCNg1wqIiwKoLcD5NJeX # epVhaUi/7TwqljrK7SGXmmfDWiTHIXDtvPrJQcSYGgqpVNFzRuq6jTXRJObeWen0 # DhOHqC0Z6OkZ2gU+eso/VRbcbawQNQohUHQzZ7c0643TxncPDKG82/MDRe2MTJnq # /z+jpw== # =Z8UY # -----END PGP SIGNATURE----- # gpg: Signature made Mon 31 Jul 2023 02:00:53 PM PDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate] * tag 'pull-tcg-20230731' of https://gitlab.com/rth7680/qemu: target/s390x: Move trans_exc_code update to do_program_interrupt linux-user/armeb: Fix __kernel_cmpxchg() for armeb target/ppc: Disable goto_tb with architectural singlestep bsd-user: Specify host page alignment if none specified bsd-user: Allocate guest virtual address space accel/tcg: Clear tcg_ctx->gen_tb on buffer overflow util/interval-tree: Use qatomic_read/set for rb_parent_color util/interval-tree: Introduce pc_parent util/interval-tree: Use qatomic_set_mb in rb_link_node util/interval-tree: Use qatomic_read for left/right while searching Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
802341823f
|
@ -374,6 +374,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
|||
"Restarting code generation for "
|
||||
"code_gen_buffer overflow\n");
|
||||
tb_unlock_pages(tb);
|
||||
tcg_ctx->gen_tb = NULL;
|
||||
goto buffer_overflow;
|
||||
|
||||
case -2:
|
||||
|
|
|
@ -473,10 +473,6 @@ int main(int argc, char **argv)
|
|||
target_environ = envlist_to_environ(envlist, NULL);
|
||||
envlist_free(envlist);
|
||||
|
||||
if (reserved_va) {
|
||||
mmap_next_start = reserved_va + 1;
|
||||
}
|
||||
|
||||
{
|
||||
Error *err = NULL;
|
||||
if (seed_optarg != NULL) {
|
||||
|
@ -494,7 +490,49 @@ int main(int argc, char **argv)
|
|||
* Now that page sizes are configured we can do
|
||||
* proper page alignment for guest_base.
|
||||
*/
|
||||
guest_base = HOST_PAGE_ALIGN(guest_base);
|
||||
if (have_guest_base) {
|
||||
if (guest_base & ~qemu_host_page_mask) {
|
||||
error_report("Selected guest base not host page aligned");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If reserving host virtual address space, do so now.
|
||||
* Combined with '-B', ensure that the chosen range is free.
|
||||
*/
|
||||
if (reserved_va) {
|
||||
void *p;
|
||||
|
||||
if (have_guest_base) {
|
||||
p = mmap((void *)guest_base, reserved_va + 1, PROT_NONE,
|
||||
MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_EXCL, -1, 0);
|
||||
} else {
|
||||
p = mmap(NULL, reserved_va + 1, PROT_NONE,
|
||||
MAP_ANON | MAP_PRIVATE, -1, 0);
|
||||
}
|
||||
if (p == MAP_FAILED) {
|
||||
const char *err = strerror(errno);
|
||||
char *sz = size_to_str(reserved_va + 1);
|
||||
|
||||
if (have_guest_base) {
|
||||
error_report("Cannot allocate %s bytes at -B %p for guest "
|
||||
"address space: %s", sz, (void *)guest_base, err);
|
||||
} else {
|
||||
error_report("Cannot allocate %s bytes for guest "
|
||||
"address space: %s", sz, err);
|
||||
}
|
||||
exit(1);
|
||||
}
|
||||
guest_base = (uintptr_t)p;
|
||||
have_guest_base = true;
|
||||
|
||||
/* Ensure that mmap_next_start is within range. */
|
||||
if (reserved_va <= mmap_next_start) {
|
||||
mmap_next_start = (reserved_va / 4 * 3)
|
||||
& TARGET_PAGE_MASK & qemu_host_page_mask;
|
||||
}
|
||||
}
|
||||
|
||||
if (loader_exec(filename, argv + optind, target_environ, regs, info,
|
||||
&bprm) != 0) {
|
||||
|
|
|
@ -260,7 +260,8 @@ static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
|
|||
|
||||
if (reserved_va) {
|
||||
return mmap_find_vma_reserved(start, size,
|
||||
(alignment != 0 ? 1 << alignment : 0));
|
||||
(alignment != 0 ? 1 << alignment :
|
||||
MAX(qemu_host_page_size, TARGET_PAGE_SIZE)));
|
||||
}
|
||||
|
||||
addr = start;
|
||||
|
|
|
@ -117,8 +117,9 @@ static void arm_kernel_cmpxchg32_helper(CPUARMState *env)
|
|||
{
|
||||
uint32_t oldval, newval, val, addr, cpsr, *host_addr;
|
||||
|
||||
oldval = env->regs[0];
|
||||
newval = env->regs[1];
|
||||
/* Swap if host != guest endianness, for the host cmpxchg below */
|
||||
oldval = tswap32(env->regs[0]);
|
||||
newval = tswap32(env->regs[1]);
|
||||
addr = env->regs[2];
|
||||
|
||||
mmap_lock();
|
||||
|
@ -174,6 +175,10 @@ static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Swap if host != guest endianness, for the host cmpxchg below */
|
||||
oldval = tswap64(oldval);
|
||||
newval = tswap64(newval);
|
||||
|
||||
#ifdef CONFIG_ATOMIC64
|
||||
val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval);
|
||||
cpsr = (val == oldval) * CPSR_C;
|
||||
|
|
|
@ -4175,6 +4175,9 @@ static void pmu_count_insns(DisasContext *ctx)
|
|||
|
||||
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
|
||||
{
|
||||
if (unlikely(ctx->singlestep_enabled)) {
|
||||
return false;
|
||||
}
|
||||
return translator_use_goto_tb(&ctx->base, dest);
|
||||
}
|
||||
|
||||
|
|
|
@ -190,11 +190,6 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (excp != PGM_ADDRESSING) {
|
||||
stq_phys(env_cpu(env)->as,
|
||||
env->psa + offsetof(LowCore, trans_exc_code), tec);
|
||||
}
|
||||
|
||||
/*
|
||||
* For data accesses, ILEN will be filled in from the unwind info,
|
||||
* within cpu_loop_exit_restore. For code accesses, retaddr == 0,
|
||||
|
@ -211,20 +206,33 @@ static void do_program_interrupt(CPUS390XState *env)
|
|||
uint64_t mask, addr;
|
||||
LowCore *lowcore;
|
||||
int ilen = env->int_pgm_ilen;
|
||||
bool set_trans_exc_code = false;
|
||||
bool advance = false;
|
||||
|
||||
assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) ||
|
||||
ilen == 2 || ilen == 4 || ilen == 6);
|
||||
|
||||
switch (env->int_pgm_code) {
|
||||
case PGM_PER:
|
||||
if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
|
||||
break;
|
||||
}
|
||||
/* FALL THROUGH */
|
||||
advance = !(env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION);
|
||||
break;
|
||||
case PGM_ASCE_TYPE:
|
||||
case PGM_REG_FIRST_TRANS:
|
||||
case PGM_REG_SEC_TRANS:
|
||||
case PGM_REG_THIRD_TRANS:
|
||||
case PGM_SEGMENT_TRANS:
|
||||
case PGM_PAGE_TRANS:
|
||||
assert(env->int_pgm_code == env->tlb_fill_exc);
|
||||
set_trans_exc_code = true;
|
||||
break;
|
||||
case PGM_PROTECTION:
|
||||
assert(env->int_pgm_code == env->tlb_fill_exc);
|
||||
set_trans_exc_code = true;
|
||||
advance = true;
|
||||
break;
|
||||
case PGM_OPERATION:
|
||||
case PGM_PRIVILEGED:
|
||||
case PGM_EXECUTE:
|
||||
case PGM_PROTECTION:
|
||||
case PGM_ADDRESSING:
|
||||
case PGM_SPECIFICATION:
|
||||
case PGM_DATA:
|
||||
|
@ -243,11 +251,15 @@ static void do_program_interrupt(CPUS390XState *env)
|
|||
case PGM_PC_TRANS_SPEC:
|
||||
case PGM_ALET_SPEC:
|
||||
case PGM_MONITOR:
|
||||
/* advance the PSW if our exception is not nullifying */
|
||||
env->psw.addr += ilen;
|
||||
advance = true;
|
||||
break;
|
||||
}
|
||||
|
||||
/* advance the PSW if our exception is not nullifying */
|
||||
if (advance) {
|
||||
env->psw.addr += ilen;
|
||||
}
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
|
||||
__func__, env->int_pgm_code, ilen, env->psw.mask,
|
||||
|
@ -263,6 +275,10 @@ static void do_program_interrupt(CPUS390XState *env)
|
|||
env->per_perc_atmid = 0;
|
||||
}
|
||||
|
||||
if (set_trans_exc_code) {
|
||||
lowcore->trans_exc_code = cpu_to_be64(env->tlb_fill_tec);
|
||||
}
|
||||
|
||||
lowcore->pgm_ilen = cpu_to_be16(ilen);
|
||||
lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
|
||||
lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
|
||||
|
|
|
@ -48,12 +48,6 @@
|
|||
*
|
||||
* It also guarantees that if the lookup returns an element it is the 'correct'
|
||||
* one. But not returning an element does _NOT_ mean it's not present.
|
||||
*
|
||||
* NOTE:
|
||||
*
|
||||
* Stores to __rb_parent_color are not important for simple lookups so those
|
||||
* are left undone as of now. Nor did I check for loops involving parent
|
||||
* pointers.
|
||||
*/
|
||||
|
||||
typedef enum RBColor
|
||||
|
@ -68,14 +62,29 @@ typedef struct RBAugmentCallbacks {
|
|||
void (*rotate)(RBNode *old, RBNode *new);
|
||||
} RBAugmentCallbacks;
|
||||
|
||||
static inline uintptr_t rb_pc(const RBNode *n)
|
||||
{
|
||||
return qatomic_read(&n->rb_parent_color);
|
||||
}
|
||||
|
||||
static inline void rb_set_pc(RBNode *n, uintptr_t pc)
|
||||
{
|
||||
qatomic_set(&n->rb_parent_color, pc);
|
||||
}
|
||||
|
||||
static inline RBNode *pc_parent(uintptr_t pc)
|
||||
{
|
||||
return (RBNode *)(pc & ~1);
|
||||
}
|
||||
|
||||
static inline RBNode *rb_parent(const RBNode *n)
|
||||
{
|
||||
return (RBNode *)(n->rb_parent_color & ~1);
|
||||
return pc_parent(rb_pc(n));
|
||||
}
|
||||
|
||||
static inline RBNode *rb_red_parent(const RBNode *n)
|
||||
{
|
||||
return (RBNode *)n->rb_parent_color;
|
||||
return (RBNode *)rb_pc(n);
|
||||
}
|
||||
|
||||
static inline RBColor pc_color(uintptr_t pc)
|
||||
|
@ -95,27 +104,27 @@ static inline bool pc_is_black(uintptr_t pc)
|
|||
|
||||
static inline RBColor rb_color(const RBNode *n)
|
||||
{
|
||||
return pc_color(n->rb_parent_color);
|
||||
return pc_color(rb_pc(n));
|
||||
}
|
||||
|
||||
static inline bool rb_is_red(const RBNode *n)
|
||||
{
|
||||
return pc_is_red(n->rb_parent_color);
|
||||
return pc_is_red(rb_pc(n));
|
||||
}
|
||||
|
||||
static inline bool rb_is_black(const RBNode *n)
|
||||
{
|
||||
return pc_is_black(n->rb_parent_color);
|
||||
return pc_is_black(rb_pc(n));
|
||||
}
|
||||
|
||||
static inline void rb_set_black(RBNode *n)
|
||||
{
|
||||
n->rb_parent_color |= RB_BLACK;
|
||||
rb_set_pc(n, rb_pc(n) | RB_BLACK);
|
||||
}
|
||||
|
||||
static inline void rb_set_parent_color(RBNode *n, RBNode *p, RBColor color)
|
||||
{
|
||||
n->rb_parent_color = (uintptr_t)p | color;
|
||||
rb_set_pc(n, (uintptr_t)p | color);
|
||||
}
|
||||
|
||||
static inline void rb_set_parent(RBNode *n, RBNode *p)
|
||||
|
@ -128,7 +137,11 @@ static inline void rb_link_node(RBNode *node, RBNode *parent, RBNode **rb_link)
|
|||
node->rb_parent_color = (uintptr_t)parent;
|
||||
node->rb_left = node->rb_right = NULL;
|
||||
|
||||
qatomic_set(rb_link, node);
|
||||
/*
|
||||
* Ensure that node is initialized before insertion,
|
||||
* as viewed by a concurrent search.
|
||||
*/
|
||||
qatomic_set_mb(rb_link, node);
|
||||
}
|
||||
|
||||
static RBNode *rb_next(RBNode *node)
|
||||
|
@ -177,9 +190,10 @@ static inline void rb_change_child(RBNode *old, RBNode *new,
|
|||
static inline void rb_rotate_set_parents(RBNode *old, RBNode *new,
|
||||
RBRoot *root, RBColor color)
|
||||
{
|
||||
RBNode *parent = rb_parent(old);
|
||||
uintptr_t pc = rb_pc(old);
|
||||
RBNode *parent = pc_parent(pc);
|
||||
|
||||
new->rb_parent_color = old->rb_parent_color;
|
||||
rb_set_pc(new, pc);
|
||||
rb_set_parent_color(old, new, color);
|
||||
rb_change_child(old, new, parent, root);
|
||||
}
|
||||
|
@ -527,11 +541,11 @@ static void rb_erase_augmented(RBNode *node, RBRoot *root,
|
|||
* and node must be black due to 4). We adjust colors locally
|
||||
* so as to bypass rb_erase_color() later on.
|
||||
*/
|
||||
pc = node->rb_parent_color;
|
||||
parent = rb_parent(node);
|
||||
pc = rb_pc(node);
|
||||
parent = pc_parent(pc);
|
||||
rb_change_child(node, child, parent, root);
|
||||
if (child) {
|
||||
child->rb_parent_color = pc;
|
||||
rb_set_pc(child, pc);
|
||||
rebalance = NULL;
|
||||
} else {
|
||||
rebalance = pc_is_black(pc) ? parent : NULL;
|
||||
|
@ -539,9 +553,9 @@ static void rb_erase_augmented(RBNode *node, RBRoot *root,
|
|||
tmp = parent;
|
||||
} else if (!child) {
|
||||
/* Still case 1, but this time the child is node->rb_left */
|
||||
pc = node->rb_parent_color;
|
||||
parent = rb_parent(node);
|
||||
tmp->rb_parent_color = pc;
|
||||
pc = rb_pc(node);
|
||||
parent = pc_parent(pc);
|
||||
rb_set_pc(tmp, pc);
|
||||
rb_change_child(node, tmp, parent, root);
|
||||
rebalance = NULL;
|
||||
tmp = parent;
|
||||
|
@ -595,8 +609,8 @@ static void rb_erase_augmented(RBNode *node, RBRoot *root,
|
|||
qatomic_set(&successor->rb_left, tmp);
|
||||
rb_set_parent(tmp, successor);
|
||||
|
||||
pc = node->rb_parent_color;
|
||||
tmp = rb_parent(node);
|
||||
pc = rb_pc(node);
|
||||
tmp = pc_parent(pc);
|
||||
rb_change_child(node, successor, tmp, root);
|
||||
|
||||
if (child2) {
|
||||
|
@ -605,7 +619,7 @@ static void rb_erase_augmented(RBNode *node, RBRoot *root,
|
|||
} else {
|
||||
rebalance = rb_is_black(successor) ? parent : NULL;
|
||||
}
|
||||
successor->rb_parent_color = pc;
|
||||
rb_set_pc(successor, pc);
|
||||
tmp = successor;
|
||||
}
|
||||
|
||||
|
@ -745,8 +759,9 @@ static IntervalTreeNode *interval_tree_subtree_search(IntervalTreeNode *node,
|
|||
* Loop invariant: start <= node->subtree_last
|
||||
* (Cond2 is satisfied by one of the subtree nodes)
|
||||
*/
|
||||
if (node->rb.rb_left) {
|
||||
IntervalTreeNode *left = rb_to_itree(node->rb.rb_left);
|
||||
RBNode *tmp = qatomic_read(&node->rb.rb_left);
|
||||
if (tmp) {
|
||||
IntervalTreeNode *left = rb_to_itree(tmp);
|
||||
|
||||
if (start <= left->subtree_last) {
|
||||
/*
|
||||
|
@ -765,8 +780,9 @@ static IntervalTreeNode *interval_tree_subtree_search(IntervalTreeNode *node,
|
|||
if (start <= node->last) { /* Cond2 */
|
||||
return node; /* node is leftmost match */
|
||||
}
|
||||
if (node->rb.rb_right) {
|
||||
node = rb_to_itree(node->rb.rb_right);
|
||||
tmp = qatomic_read(&node->rb.rb_right);
|
||||
if (tmp) {
|
||||
node = rb_to_itree(tmp);
|
||||
if (start <= node->subtree_last) {
|
||||
continue;
|
||||
}
|
||||
|
@ -814,8 +830,9 @@ IntervalTreeNode *interval_tree_iter_first(IntervalTreeRoot *root,
|
|||
IntervalTreeNode *interval_tree_iter_next(IntervalTreeNode *node,
|
||||
uint64_t start, uint64_t last)
|
||||
{
|
||||
RBNode *rb = node->rb.rb_right, *prev;
|
||||
RBNode *rb, *prev;
|
||||
|
||||
rb = qatomic_read(&node->rb.rb_right);
|
||||
while (true) {
|
||||
/*
|
||||
* Loop invariants:
|
||||
|
@ -840,7 +857,7 @@ IntervalTreeNode *interval_tree_iter_next(IntervalTreeNode *node,
|
|||
}
|
||||
prev = &node->rb;
|
||||
node = rb_to_itree(rb);
|
||||
rb = node->rb.rb_right;
|
||||
rb = qatomic_read(&node->rb.rb_right);
|
||||
} while (prev == rb);
|
||||
|
||||
/* Check if the node intersects [start;last] */
|
||||
|
|
Loading…
Reference in New Issue