mirror of https://github.com/xqemu/xqemu.git
Queued tcg patches
-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJZ/CrmAAoJEGTfOOivfiFfUP4IALDxb3kXUGYELNfQR8+P1Bnk x4p5dbDCfyIvMFGIngbjaCYlv/sMIcY780yQLZN0W+hUfr7//2kRemzNPoGWpY1p w/tsKzKn/EIpXoKruD12q5qUL6ceIabwuUdDQ5Ezc4tTAz/Nd1kiMA1IA+7i3Gfb E9F/0LbG+wXSTD87A7T754dsSLSszL0RaFwx6nJYxk876CjKv9mjJAPlmA23PKTa W8Qsk4+Ou5mP0/jyxBEQ4KAgY+LE2maP///diPuZKy7iQbhWwogga7VX1ek/o+4T 74J1ye/5SSeISGJpIW18PmR0Vcf44eqz1aMavvFRwIw8Wr6bHQ9mcQlpakWjuWo= =ncG6 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20171103' into staging Queued tcg patches # gpg: Signature made Fri 03 Nov 2017 08:37:58 GMT # gpg: using RSA key 0x64DF38E8AF7E215F # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20171103: cpu-exec: Exit exclusive region on longjmp from step_atomic tcg/s390x: Use constant pool for prologue tcg: Allow constant pool entries in the prologue Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
9c4da1fa2b
|
@ -233,6 +233,8 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||
uint32_t flags;
|
||||
uint32_t cflags = 1;
|
||||
uint32_t cf_mask = cflags & CF_HASH_MASK;
|
||||
/* volatile because we modify it between setjmp and longjmp */
|
||||
volatile bool in_exclusive_region = false;
|
||||
|
||||
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
|
||||
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
|
||||
|
@ -251,14 +253,12 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||
|
||||
/* Since we got here, we know that parallel_cpus must be true. */
|
||||
parallel_cpus = false;
|
||||
in_exclusive_region = true;
|
||||
cc->cpu_exec_enter(cpu);
|
||||
/* execute the generated code */
|
||||
trace_exec_tb(tb, pc);
|
||||
cpu_tb_exec(cpu, tb);
|
||||
cc->cpu_exec_exit(cpu);
|
||||
parallel_cpus = true;
|
||||
|
||||
end_exclusive();
|
||||
} else {
|
||||
/* We may have exited due to another problem here, so we need
|
||||
* to reset any tb_locks we may have taken but didn't release.
|
||||
|
@ -270,6 +270,15 @@ void cpu_exec_step_atomic(CPUState *cpu)
|
|||
#endif
|
||||
tb_lock_reset();
|
||||
}
|
||||
|
||||
if (in_exclusive_region) {
|
||||
/* We might longjump out of either the codegen or the
|
||||
* execution, so must make sure we only end the exclusive
|
||||
* region if we started it.
|
||||
*/
|
||||
parallel_cpus = true;
|
||||
end_exclusive();
|
||||
}
|
||||
}
|
||||
|
||||
struct tb_desc {
|
||||
|
|
|
@ -555,9 +555,6 @@ static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
|
|||
static const S390Opcode lli_insns[4] = {
|
||||
RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
|
||||
};
|
||||
static const S390Opcode ii_insns[4] = {
|
||||
RI_IILL, RI_IILH, RI_IIHL, RI_IIHH
|
||||
};
|
||||
|
||||
static bool maybe_out_small_movi(TCGContext *s, TCGType type,
|
||||
TCGReg ret, tcg_target_long sval)
|
||||
|
@ -647,36 +644,19 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
|
|||
return;
|
||||
}
|
||||
|
||||
/* When allowed, stuff it in the constant pool. */
|
||||
if (!in_prologue) {
|
||||
if (USE_REG_TB) {
|
||||
tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0);
|
||||
new_pool_label(s, sval, R_390_20, s->code_ptr - 2,
|
||||
-(intptr_t)s->code_gen_ptr);
|
||||
} else {
|
||||
tcg_out_insn(s, RIL, LGRL, ret, 0);
|
||||
new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* What's left is for the prologue, loading GUEST_BASE, and because
|
||||
it failed to match above, is known to be a full 64-bit quantity.
|
||||
We could try more than this, but it probably wouldn't pay off. */
|
||||
if (s390_facilities & FACILITY_EXT_IMM) {
|
||||
tcg_out_insn(s, RIL, LLILF, ret, uval);
|
||||
tcg_out_insn(s, RIL, IIHF, ret, uval >> 32);
|
||||
/* Otherwise, stuff it in the constant pool. */
|
||||
if (s390_facilities & FACILITY_GEN_INST_EXT) {
|
||||
tcg_out_insn(s, RIL, LGRL, ret, 0);
|
||||
new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
|
||||
} else if (USE_REG_TB && !in_prologue) {
|
||||
tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0);
|
||||
new_pool_label(s, sval, R_390_20, s->code_ptr - 2,
|
||||
-(intptr_t)s->code_gen_ptr);
|
||||
} else {
|
||||
const S390Opcode *insns = lli_insns;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
uint16_t part = uval >> (16 * i);
|
||||
if (part) {
|
||||
tcg_out_insn_RI(s, insns[i], ret, part);
|
||||
insns = ii_insns;
|
||||
}
|
||||
}
|
||||
TCGReg base = ret ? ret : TCG_TMP0;
|
||||
tcg_out_insn(s, RIL, LARL, base, 0);
|
||||
new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
|
||||
tcg_out_insn(s, RXY, LG, ret, base, TCG_REG_NONE, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
49
tcg/tcg.c
49
tcg/tcg.c
|
@ -771,12 +771,32 @@ void tcg_prologue_init(TCGContext *s)
|
|||
|
||||
/* Put the prologue at the beginning of code_gen_buffer. */
|
||||
buf0 = s->code_gen_buffer;
|
||||
total_size = s->code_gen_buffer_size;
|
||||
s->code_ptr = buf0;
|
||||
s->code_buf = buf0;
|
||||
s->data_gen_ptr = NULL;
|
||||
s->code_gen_prologue = buf0;
|
||||
|
||||
/* Compute a high-water mark, at which we voluntarily flush the buffer
|
||||
and start over. The size here is arbitrary, significantly larger
|
||||
than we expect the code generation for any one opcode to require. */
|
||||
s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
|
||||
|
||||
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
||||
s->pool_labels = NULL;
|
||||
#endif
|
||||
|
||||
/* Generate the prologue. */
|
||||
tcg_target_qemu_prologue(s);
|
||||
|
||||
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
||||
/* Allow the prologue to put e.g. guest_base into a pool entry. */
|
||||
{
|
||||
bool ok = tcg_out_pool_finalize(s);
|
||||
tcg_debug_assert(ok);
|
||||
}
|
||||
#endif
|
||||
|
||||
buf1 = s->code_ptr;
|
||||
flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
|
||||
|
||||
|
@ -785,21 +805,36 @@ void tcg_prologue_init(TCGContext *s)
|
|||
s->code_gen_ptr = buf1;
|
||||
s->code_gen_buffer = buf1;
|
||||
s->code_buf = buf1;
|
||||
total_size = s->code_gen_buffer_size - prologue_size;
|
||||
total_size -= prologue_size;
|
||||
s->code_gen_buffer_size = total_size;
|
||||
|
||||
/* Compute a high-water mark, at which we voluntarily flush the buffer
|
||||
and start over. The size here is arbitrary, significantly larger
|
||||
than we expect the code generation for any one opcode to require. */
|
||||
s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
|
||||
|
||||
tcg_register_jit(s->code_gen_buffer, total_size);
|
||||
|
||||
#ifdef DEBUG_DISAS
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
|
||||
qemu_log_lock();
|
||||
qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
|
||||
log_disas(buf0, prologue_size);
|
||||
if (s->data_gen_ptr) {
|
||||
size_t code_size = s->data_gen_ptr - buf0;
|
||||
size_t data_size = prologue_size - code_size;
|
||||
size_t i;
|
||||
|
||||
log_disas(buf0, code_size);
|
||||
|
||||
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
|
||||
if (sizeof(tcg_target_ulong) == 8) {
|
||||
qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
|
||||
(uintptr_t)s->data_gen_ptr + i,
|
||||
*(uint64_t *)(s->data_gen_ptr + i));
|
||||
} else {
|
||||
qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
|
||||
(uintptr_t)s->data_gen_ptr + i,
|
||||
*(uint32_t *)(s->data_gen_ptr + i));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log_disas(buf0, prologue_size);
|
||||
}
|
||||
qemu_log("\n");
|
||||
qemu_log_flush();
|
||||
qemu_log_unlock();
|
||||
|
|
Loading…
Reference in New Issue