mirror of https://github.com/xemu-project/xemu.git
target/i386: Make translator stop before the end of a page
Right now translator stops right *after* the end of a page, which breaks reporting of fault locations when the last instruction of a multi-insn translation block crosses a page boundary. An implementation, like the one arm and s390x have, would require an i386 length disassembler, which is burdensome to maintain. Another alternative would be to single-step at the end of a guest page, but this may come with a performance impact. Fix by snapshotting disassembly state and restoring it after we figure out we crossed a page boundary. This includes rolling back cc_op updates and emitted ops. Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1143 Message-Id: <20220817150506.592862-4-iii@linux.ibm.com> [rth: Simplify end-of-insn cross-page checks.] Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
ab12c95d3f
commit
950936681f
|
@ -130,6 +130,7 @@ typedef struct DisasContext {
|
|||
TCGv_i64 tmp1_i64;
|
||||
|
||||
sigjmp_buf jmpbuf;
|
||||
TCGOp *prev_insn_end;
|
||||
} DisasContext;
|
||||
|
||||
/* The environment in which user-only runs is constrained. */
|
||||
|
@ -2008,6 +2009,12 @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
|
|||
{
|
||||
uint64_t pc = s->pc;
|
||||
|
||||
/* This is a subsequent insn that crosses a page boundary. */
|
||||
if (s->base.num_insns > 1 &&
|
||||
!is_same_page(&s->base, s->pc + num_bytes - 1)) {
|
||||
siglongjmp(s->jmpbuf, 2);
|
||||
}
|
||||
|
||||
s->pc += num_bytes;
|
||||
if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) {
|
||||
/* If the instruction's 16th byte is on a different page than the 1st, a
|
||||
|
@ -4669,6 +4676,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
int modrm, reg, rm, mod, op, opreg, val;
|
||||
target_ulong next_eip, tval;
|
||||
target_ulong pc_start = s->base.pc_next;
|
||||
bool orig_cc_op_dirty = s->cc_op_dirty;
|
||||
CCOp orig_cc_op = s->cc_op;
|
||||
|
||||
s->pc_start = s->pc = pc_start;
|
||||
s->override = -1;
|
||||
|
@ -4681,9 +4690,22 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
s->rip_offset = 0; /* for relative ip address */
|
||||
s->vex_l = 0;
|
||||
s->vex_v = 0;
|
||||
if (sigsetjmp(s->jmpbuf, 0) != 0) {
|
||||
switch (sigsetjmp(s->jmpbuf, 0)) {
|
||||
case 0:
|
||||
break;
|
||||
case 1:
|
||||
gen_exception_gpf(s);
|
||||
return s->pc;
|
||||
case 2:
|
||||
/* Restore state that may affect the next instruction. */
|
||||
s->cc_op_dirty = orig_cc_op_dirty;
|
||||
s->cc_op = orig_cc_op;
|
||||
s->base.num_insns--;
|
||||
tcg_remove_ops_after(s->prev_insn_end);
|
||||
s->base.is_jmp = DISAS_TOO_MANY;
|
||||
return pc_start;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
prefixes = 0;
|
||||
|
@ -8745,6 +8767,7 @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
|||
{
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
|
||||
dc->prev_insn_end = tcg_last_op();
|
||||
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
|
||||
}
|
||||
|
||||
|
@ -8765,31 +8788,22 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|||
#endif
|
||||
|
||||
pc_next = disas_insn(dc, cpu);
|
||||
|
||||
if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
|
||||
/* if single step mode, we generate only one instruction and
|
||||
generate an exception */
|
||||
/* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
|
||||
the flag and abort the translation to give the irqs a
|
||||
chance to happen */
|
||||
dc->base.is_jmp = DISAS_TOO_MANY;
|
||||
} else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
|
||||
&& ((pc_next & TARGET_PAGE_MASK)
|
||||
!= ((pc_next + TARGET_MAX_INSN_SIZE - 1)
|
||||
& TARGET_PAGE_MASK)
|
||||
|| (pc_next & ~TARGET_PAGE_MASK) == 0)) {
|
||||
/* Do not cross the boundary of the pages in icount mode,
|
||||
it can cause an exception. Do it only when boundary is
|
||||
crossed by the first instruction in the block.
|
||||
If current instruction already crossed the bound - it's ok,
|
||||
because an exception hasn't stopped this code.
|
||||
*/
|
||||
dc->base.is_jmp = DISAS_TOO_MANY;
|
||||
} else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) {
|
||||
dc->base.is_jmp = DISAS_TOO_MANY;
|
||||
}
|
||||
|
||||
dc->base.pc_next = pc_next;
|
||||
|
||||
if (dc->base.is_jmp == DISAS_NEXT) {
|
||||
if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
|
||||
/*
|
||||
* If single step mode, we generate only one instruction and
|
||||
* generate an exception.
|
||||
* If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
|
||||
* the flag and abort the translation to give the irqs a
|
||||
* chance to happen.
|
||||
*/
|
||||
dc->base.is_jmp = DISAS_TOO_MANY;
|
||||
} else if (!is_same_page(&dc->base, pc_next)) {
|
||||
dc->base.is_jmp = DISAS_TOO_MANY;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
||||
|
|
|
@ -10,6 +10,7 @@ include $(SRC_PATH)/tests/tcg/i386/Makefile.target
|
|||
|
||||
ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET))
|
||||
X86_64_TESTS += vsyscall
|
||||
X86_64_TESTS += noexec
|
||||
TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64
|
||||
else
|
||||
TESTS=$(MULTIARCH_TESTS)
|
||||
|
@ -23,5 +24,5 @@ test-x86_64: LDFLAGS+=-lm -lc
|
|||
test-x86_64: test-i386.c test-i386.h test-i386-shift.h test-i386-muldiv.h
|
||||
$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
|
||||
|
||||
vsyscall: $(SRC_PATH)/tests/tcg/x86_64/vsyscall.c
|
||||
%: $(SRC_PATH)/tests/tcg/x86_64/%.c
|
||||
$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
#include "../multiarch/noexec.c.inc"
|
||||
|
||||
static void *arch_mcontext_pc(const mcontext_t *ctx)
|
||||
{
|
||||
return (void *)ctx->gregs[REG_RIP];
|
||||
}
|
||||
|
||||
int arch_mcontext_arg(const mcontext_t *ctx)
|
||||
{
|
||||
return ctx->gregs[REG_RDI];
|
||||
}
|
||||
|
||||
static void arch_flush(void *p, int len)
|
||||
{
|
||||
}
|
||||
|
||||
extern char noexec_1[];
|
||||
extern char noexec_2[];
|
||||
extern char noexec_end[];
|
||||
|
||||
asm("noexec_1:\n"
|
||||
" movq $1,%rdi\n" /* %rdi is 0 on entry, set 1. */
|
||||
"noexec_2:\n"
|
||||
" movq $2,%rdi\n" /* %rdi is 0/1; set 2. */
|
||||
" ret\n"
|
||||
"noexec_end:");
|
||||
|
||||
int main(void)
|
||||
{
|
||||
struct noexec_test noexec_tests[] = {
|
||||
{
|
||||
.name = "fallthrough",
|
||||
.test_code = noexec_1,
|
||||
.test_len = noexec_end - noexec_1,
|
||||
.page_ofs = noexec_1 - noexec_2,
|
||||
.entry_ofs = noexec_1 - noexec_2,
|
||||
.expected_si_ofs = 0,
|
||||
.expected_pc_ofs = 0,
|
||||
.expected_arg = 1,
|
||||
},
|
||||
{
|
||||
.name = "jump",
|
||||
.test_code = noexec_1,
|
||||
.test_len = noexec_end - noexec_1,
|
||||
.page_ofs = noexec_1 - noexec_2,
|
||||
.entry_ofs = 0,
|
||||
.expected_si_ofs = 0,
|
||||
.expected_pc_ofs = 0,
|
||||
.expected_arg = 0,
|
||||
},
|
||||
{
|
||||
.name = "fallthrough [cross]",
|
||||
.test_code = noexec_1,
|
||||
.test_len = noexec_end - noexec_1,
|
||||
.page_ofs = noexec_1 - noexec_2 - 2,
|
||||
.entry_ofs = noexec_1 - noexec_2 - 2,
|
||||
.expected_si_ofs = 0,
|
||||
.expected_pc_ofs = -2,
|
||||
.expected_arg = 1,
|
||||
},
|
||||
{
|
||||
.name = "jump [cross]",
|
||||
.test_code = noexec_1,
|
||||
.test_len = noexec_end - noexec_1,
|
||||
.page_ofs = noexec_1 - noexec_2 - 2,
|
||||
.entry_ofs = -2,
|
||||
.expected_si_ofs = 0,
|
||||
.expected_pc_ofs = -2,
|
||||
.expected_arg = 0,
|
||||
},
|
||||
};
|
||||
|
||||
return test_noexec(noexec_tests,
|
||||
sizeof(noexec_tests) / sizeof(noexec_tests[0]));
|
||||
}
|
Loading…
Reference in New Issue