mirror of https://github.com/xemu-project/xemu.git
Revert "linux-user/arm: Take more care allocating commpage"
accel/tcg: Fix jump cache set in cpu_exec_loop accel/tcg: Fix initialization of CF_PCREL in tcg_cflags tcg/sparc64: Disable direct jumps from goto_tb -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmQsRwAdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8QqQf6AnUwdkp0zEUevshL gPlns3u5RZIv/i5L1VIkYrLbaFQtwD17CfBACB2MxZI6lbPK4jT++FlDiWJ1ak7i 4p9Q5KOAbjAxQgQXy51NbEzg5AoIYP5HEg2cnjfEg0PltRVTn0bdbBVbCG/erDXa NfEOPtHSU+6B8jRjp0MRvFv4Y4CJ3nQ7eZ6TMI3oGOk44DOs22Fn330E8wT2vpt4 ayJNDN8F0FZ5wGZdJIAsMSgauaGJwY/ZG1KX8TGQb7nsRn5lbpEgoOUCd0WUGdx6 3YsoELaZoHZhAlvVNjbOMBp3fZSH2owb08By9vU7ZMjPOnsjZQz6TuxR6NNl+04G tPi44Q== =+m7M -----END PGP SIGNATURE----- Merge tag 'pull-tcg-20230404' of https://gitlab.com/rth7680/qemu into staging Revert "linux-user/arm: Take more care allocating commpage" accel/tcg: Fix jump cache set in cpu_exec_loop accel/tcg: Fix initialization of CF_PCREL in tcg_cflags tcg/sparc64: Disable direct jumps from goto_tb # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmQsRwAdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8QqQf6AnUwdkp0zEUevshL # gPlns3u5RZIv/i5L1VIkYrLbaFQtwD17CfBACB2MxZI6lbPK4jT++FlDiWJ1ak7i # 4p9Q5KOAbjAxQgQXy51NbEzg5AoIYP5HEg2cnjfEg0PltRVTn0bdbBVbCG/erDXa # NfEOPtHSU+6B8jRjp0MRvFv4Y4CJ3nQ7eZ6TMI3oGOk44DOs22Fn330E8wT2vpt4 # ayJNDN8F0FZ5wGZdJIAsMSgauaGJwY/ZG1KX8TGQb7nsRn5lbpEgoOUCd0WUGdx6 # 3YsoELaZoHZhAlvVNjbOMBp3fZSH2owb08By9vU7ZMjPOnsjZQz6TuxR6NNl+04G # tPi44Q== # =+m7M # -----END PGP SIGNATURE----- # gpg: Signature made Tue 04 Apr 2023 16:49:20 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20230404' of https://gitlab.com/rth7680/qemu: tcg/sparc64: Disable direct linking for goto_tb accel/tcg: Fix jump cache set in cpu_exec_loop accel/tcg: Fix overwrite problems of tcg_cflags Revert "linux-user/arm: Take more care allocating commpage" Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
7d0334e491
|
@ -257,7 +257,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
|
|||
|
||||
if (cflags & CF_PCREL) {
|
||||
/* Use acquire to ensure current load of pc from jc. */
|
||||
tb = qatomic_load_acquire(&jc->array[hash].tb);
|
||||
tb = qatomic_load_acquire(&jc->array[hash].tb);
|
||||
|
||||
if (likely(tb &&
|
||||
jc->array[hash].pc == pc &&
|
||||
|
@ -272,7 +272,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
|
|||
return NULL;
|
||||
}
|
||||
jc->array[hash].pc = pc;
|
||||
/* Use store_release on tb to ensure pc is written first. */
|
||||
/* Ensure pc is written first. */
|
||||
qatomic_store_release(&jc->array[hash].tb, tb);
|
||||
} else {
|
||||
/* Use rcu_read to ensure current load of pc from *tb. */
|
||||
|
@ -971,18 +971,27 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
|
|||
|
||||
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
||||
if (tb == NULL) {
|
||||
CPUJumpCache *jc;
|
||||
uint32_t h;
|
||||
|
||||
mmap_lock();
|
||||
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
|
||||
mmap_unlock();
|
||||
|
||||
/*
|
||||
* We add the TB in the virtual pc hash table
|
||||
* for the fast lookup
|
||||
*/
|
||||
h = tb_jmp_cache_hash_func(pc);
|
||||
/* Use the pc value already stored in tb->pc. */
|
||||
qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
|
||||
jc = cpu->tb_jmp_cache;
|
||||
if (cflags & CF_PCREL) {
|
||||
jc->array[h].pc = pc;
|
||||
/* Ensure pc is written first. */
|
||||
qatomic_store_release(&jc->array[h].tb, tb);
|
||||
} else {
|
||||
/* Use the pc value already stored in tb->pc. */
|
||||
qatomic_set(&jc->array[h].tb, tb);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
|
|
@ -59,7 +59,7 @@ void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
|
|||
|
||||
cflags |= parallel ? CF_PARALLEL : 0;
|
||||
cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
|
||||
cpu->tcg_cflags = cflags;
|
||||
cpu->tcg_cflags |= cflags;
|
||||
}
|
||||
|
||||
void tcg_cpus_destroy(CPUState *cpu)
|
||||
|
|
|
@ -423,32 +423,12 @@ enum {
|
|||
|
||||
static bool init_guest_commpage(void)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(thread_cpu);
|
||||
abi_ptr want = HI_COMMPAGE & TARGET_PAGE_MASK;
|
||||
abi_ptr addr;
|
||||
abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
|
||||
void *want = g2h_untagged(commpage);
|
||||
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
||||
|
||||
/*
|
||||
* M-profile allocates maximum of 2GB address space, so can never
|
||||
* allocate the commpage. Skip it.
|
||||
*/
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If reserved_va does not cover the commpage, we get an assert
|
||||
* in page_set_flags. Produce an intelligent error instead.
|
||||
*/
|
||||
if (reserved_va != 0 && want + TARGET_PAGE_SIZE - 1 > reserved_va) {
|
||||
error_report("Allocating guest commpage: -R 0x%" PRIx64 " too small",
|
||||
(uint64_t)reserved_va + 1);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
addr = target_mmap(want, TARGET_PAGE_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
||||
|
||||
if (addr == -1) {
|
||||
if (addr == MAP_FAILED) {
|
||||
perror("Allocating guest commpage");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
@ -457,12 +437,15 @@ static bool init_guest_commpage(void)
|
|||
}
|
||||
|
||||
/* Set kernel helper versions; rest of page is 0. */
|
||||
put_user_u32(5, 0xffff0ffcu);
|
||||
__put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
|
||||
|
||||
if (target_mprotect(addr, qemu_host_page_size, PROT_READ | PROT_EXEC)) {
|
||||
if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
|
||||
perror("Protecting guest commpage");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
page_set_flags(commpage, commpage | ~qemu_host_page_mask,
|
||||
PAGE_READ | PAGE_EXEC | PAGE_VALID);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -1445,12 +1445,12 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
|
|||
{
|
||||
ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
|
||||
|
||||
/* Direct branch will be patched by tb_target_set_jmp_target. */
|
||||
/* Load link and indirect branch. */
|
||||
set_jmp_insn_offset(s, which);
|
||||
tcg_out32(s, CALL);
|
||||
/* delay slot */
|
||||
tcg_debug_assert(check_fit_ptr(off, 13));
|
||||
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
|
||||
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
|
||||
/* delay slot */
|
||||
tcg_out_nop(s);
|
||||
set_jmp_reset_offset(s, which);
|
||||
|
||||
/*
|
||||
|
@ -1469,28 +1469,6 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
|
|||
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
|
||||
uintptr_t jmp_rx, uintptr_t jmp_rw)
|
||||
{
|
||||
uintptr_t addr = tb->jmp_target_addr[n];
|
||||
intptr_t br_disp = (intptr_t)(addr - jmp_rx) >> 2;
|
||||
tcg_insn_unit insn;
|
||||
|
||||
br_disp >>= 2;
|
||||
if (check_fit_ptr(br_disp, 19)) {
|
||||
/* ba,pt %icc, addr */
|
||||
insn = deposit32(INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
|
||||
| BPCC_ICC | BPCC_PT, 0, 19, br_disp);
|
||||
} else if (check_fit_ptr(br_disp, 22)) {
|
||||
/* ba addr */
|
||||
insn = deposit32(INSN_OP(0) | INSN_OP2(2) | INSN_COND(COND_A),
|
||||
0, 22, br_disp);
|
||||
} else {
|
||||
/* The code_gen_buffer can't be larger than 2GB. */
|
||||
tcg_debug_assert(check_fit_ptr(br_disp, 30));
|
||||
/* call addr */
|
||||
insn = deposit32(CALL, 0, 30, br_disp);
|
||||
}
|
||||
|
||||
qatomic_set((uint32_t *)jmp_rw, insn);
|
||||
flush_idcache_range(jmp_rx, jmp_rw, 4);
|
||||
}
|
||||
|
||||
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||
|
|
Loading…
Reference in New Issue