mirror of https://github.com/xemu-project/xemu.git
target/i386: Assert PE is set for user-only
A user-mode executable is never in real-mode. Since we're adding an accessor macro, pull the value directly out of flags for sysemu. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20210514151342.384376-7-richard.henderson@linaro.org>
This commit is contained in:
parent
ca7874c2fa
commit
d75f912927
|
@ -94,7 +94,6 @@ typedef struct DisasContext {
|
|||
target_ulong pc; /* pc = eip + cs_base */
|
||||
/* current block context */
|
||||
target_ulong cs_base; /* base of CS segment */
|
||||
int pe; /* protected mode */
|
||||
int code32; /* 32 bit code segment */
|
||||
#ifdef TARGET_X86_64
|
||||
int lma; /* long mode active */
|
||||
|
@ -146,6 +145,13 @@ typedef struct DisasContext {
|
|||
sigjmp_buf jmpbuf;
|
||||
} DisasContext;
|
||||
|
||||
/* The environment in which user-only runs is constrained. */
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define PE(S) true
|
||||
#else
|
||||
#define PE(S) (((S)->flags & HF_PE_MASK) != 0)
|
||||
#endif
|
||||
|
||||
static void gen_eob(DisasContext *s);
|
||||
static void gen_jr(DisasContext *s, TCGv dest);
|
||||
static void gen_jmp(DisasContext *s, target_ulong eip);
|
||||
|
@ -617,7 +623,7 @@ static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip,
|
|||
{
|
||||
target_ulong next_eip;
|
||||
|
||||
if (s->pe && (s->cpl > s->iopl || s->vm86)) {
|
||||
if (PE(s) && (s->cpl > s->iopl || s->vm86)) {
|
||||
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
||||
switch (ot) {
|
||||
case MO_8:
|
||||
|
@ -2345,7 +2351,7 @@ static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
|
|||
call this function with seg_reg == R_CS */
|
||||
static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
|
||||
{
|
||||
if (s->pe && !s->vm86) {
|
||||
if (PE(s) && !s->vm86) {
|
||||
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
||||
gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32);
|
||||
/* abort translation because the addseg value may change or
|
||||
|
@ -5108,7 +5114,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_add_A0_im(s, 1 << ot);
|
||||
gen_op_ld_v(s, MO_16, s->T0, s->A0);
|
||||
do_lcall:
|
||||
if (s->pe && !s->vm86) {
|
||||
if (PE(s) && !s->vm86) {
|
||||
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
||||
gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1,
|
||||
tcg_const_i32(dflag - 1),
|
||||
|
@ -5138,7 +5144,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_add_A0_im(s, 1 << ot);
|
||||
gen_op_ld_v(s, MO_16, s->T0, s->A0);
|
||||
do_ljmp:
|
||||
if (s->pe && !s->vm86) {
|
||||
if (PE(s) && !s->vm86) {
|
||||
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
||||
gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1,
|
||||
tcg_const_tl(s->pc - s->cs_base));
|
||||
|
@ -6571,7 +6577,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
case 0xca: /* lret im */
|
||||
val = x86_ldsw_code(env, s);
|
||||
do_lret:
|
||||
if (s->pe && !s->vm86) {
|
||||
if (PE(s) && !s->vm86) {
|
||||
gen_update_cc_op(s);
|
||||
gen_jmp_im(s, pc_start - s->cs_base);
|
||||
gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
|
||||
|
@ -6597,7 +6603,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
goto do_lret;
|
||||
case 0xcf: /* iret */
|
||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
|
||||
if (!s->pe || s->vm86) {
|
||||
if (!PE(s) || s->vm86) {
|
||||
/* real mode or vm86 mode */
|
||||
if (!check_vm86_iopl(s)) {
|
||||
break;
|
||||
|
@ -7236,7 +7242,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
/* For Intel SYSENTER is valid on 64-bit */
|
||||
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
|
||||
goto illegal_op;
|
||||
if (!s->pe) {
|
||||
if (!PE(s)) {
|
||||
gen_exception_gpf(s);
|
||||
} else {
|
||||
gen_helper_sysenter(cpu_env);
|
||||
|
@ -7247,7 +7253,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
/* For Intel SYSEXIT is valid on 64-bit */
|
||||
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
|
||||
goto illegal_op;
|
||||
if (!s->pe) {
|
||||
if (!PE(s)) {
|
||||
gen_exception_gpf(s);
|
||||
} else {
|
||||
gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
|
||||
|
@ -7266,7 +7272,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_eob_worker(s, false, true);
|
||||
break;
|
||||
case 0x107: /* sysret */
|
||||
if (!s->pe) {
|
||||
if (!PE(s)) {
|
||||
gen_exception_gpf(s);
|
||||
} else {
|
||||
gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
|
||||
|
@ -7301,7 +7307,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
op = (modrm >> 3) & 7;
|
||||
switch(op) {
|
||||
case 0: /* sldt */
|
||||
if (!s->pe || s->vm86)
|
||||
if (!PE(s) || s->vm86)
|
||||
goto illegal_op;
|
||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
|
||||
tcg_gen_ld32u_tl(s->T0, cpu_env,
|
||||
|
@ -7310,7 +7316,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
||||
break;
|
||||
case 2: /* lldt */
|
||||
if (!s->pe || s->vm86)
|
||||
if (!PE(s) || s->vm86)
|
||||
goto illegal_op;
|
||||
if (check_cpl0(s)) {
|
||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
|
||||
|
@ -7320,7 +7326,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
}
|
||||
break;
|
||||
case 1: /* str */
|
||||
if (!s->pe || s->vm86)
|
||||
if (!PE(s) || s->vm86)
|
||||
goto illegal_op;
|
||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
|
||||
tcg_gen_ld32u_tl(s->T0, cpu_env,
|
||||
|
@ -7329,7 +7335,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
||||
break;
|
||||
case 3: /* ltr */
|
||||
if (!s->pe || s->vm86)
|
||||
if (!PE(s) || s->vm86)
|
||||
goto illegal_op;
|
||||
if (check_cpl0(s)) {
|
||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
|
||||
|
@ -7340,7 +7346,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
break;
|
||||
case 4: /* verr */
|
||||
case 5: /* verw */
|
||||
if (!s->pe || s->vm86)
|
||||
if (!PE(s) || s->vm86)
|
||||
goto illegal_op;
|
||||
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
|
||||
gen_update_cc_op(s);
|
||||
|
@ -7458,7 +7464,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
break;
|
||||
|
||||
case 0xd8: /* VMRUN */
|
||||
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
|
||||
if (!(s->flags & HF_SVME_MASK) || !PE(s)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (!check_cpl0(s)) {
|
||||
|
@ -7482,7 +7488,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
break;
|
||||
|
||||
case 0xda: /* VMLOAD */
|
||||
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
|
||||
if (!(s->flags & HF_SVME_MASK) || !PE(s)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (!check_cpl0(s)) {
|
||||
|
@ -7494,7 +7500,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
break;
|
||||
|
||||
case 0xdb: /* VMSAVE */
|
||||
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
|
||||
if (!(s->flags & HF_SVME_MASK) || !PE(s)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (!check_cpl0(s)) {
|
||||
|
@ -7508,7 +7514,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
case 0xdc: /* STGI */
|
||||
if ((!(s->flags & HF_SVME_MASK)
|
||||
&& !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
|
||||
|| !s->pe) {
|
||||
|| !PE(s)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (!check_cpl0(s)) {
|
||||
|
@ -7521,7 +7527,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
break;
|
||||
|
||||
case 0xdd: /* CLGI */
|
||||
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
|
||||
if (!(s->flags & HF_SVME_MASK) || !PE(s)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (!check_cpl0(s)) {
|
||||
|
@ -7535,7 +7541,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
case 0xde: /* SKINIT */
|
||||
if ((!(s->flags & HF_SVME_MASK)
|
||||
&& !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
|
||||
|| !s->pe) {
|
||||
|| !PE(s)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
gen_update_cc_op(s);
|
||||
|
@ -7544,7 +7550,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
break;
|
||||
|
||||
case 0xdf: /* INVLPGA */
|
||||
if (!(s->flags & HF_SVME_MASK) || !s->pe) {
|
||||
if (!(s->flags & HF_SVME_MASK) || !PE(s)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (!check_cpl0(s)) {
|
||||
|
@ -7711,7 +7717,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
TCGLabel *label1;
|
||||
TCGv t0, t1, t2, a0;
|
||||
|
||||
if (!s->pe || s->vm86)
|
||||
if (!PE(s) || s->vm86)
|
||||
goto illegal_op;
|
||||
t0 = tcg_temp_local_new();
|
||||
t1 = tcg_temp_local_new();
|
||||
|
@ -7759,7 +7765,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
{
|
||||
TCGLabel *label1;
|
||||
TCGv t0;
|
||||
if (!s->pe || s->vm86)
|
||||
if (!PE(s) || s->vm86)
|
||||
goto illegal_op;
|
||||
ot = dflag != MO_16 ? MO_32 : MO_16;
|
||||
modrm = x86_ldub_code(env, s);
|
||||
|
@ -8461,9 +8467,13 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
|
|||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
CPUX86State *env = cpu->env_ptr;
|
||||
uint32_t flags = dc->base.tb->flags;
|
||||
target_ulong cs_base = dc->base.tb->cs_base;
|
||||
|
||||
dc->pe = (flags >> HF_PE_SHIFT) & 1;
|
||||
dc->cs_base = dc->base.tb->cs_base;
|
||||
dc->flags = flags;
|
||||
|
||||
/* We make some simplifying assumptions; validate they're correct. */
|
||||
g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
|
||||
|
||||
dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
|
||||
dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
|
||||
dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
|
||||
|
@ -8474,7 +8484,6 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
|
|||
dc->tf = (flags >> TF_SHIFT) & 1;
|
||||
dc->cc_op = CC_OP_DYNAMIC;
|
||||
dc->cc_op_dirty = false;
|
||||
dc->cs_base = cs_base;
|
||||
dc->popl_esp_hack = 0;
|
||||
/* select memory access functions */
|
||||
dc->mem_index = 0;
|
||||
|
@ -8491,7 +8500,6 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
|
|||
dc->lma = (flags >> HF_LMA_SHIFT) & 1;
|
||||
dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
|
||||
#endif
|
||||
dc->flags = flags;
|
||||
dc->jmp_opt = !(dc->tf || dc->base.singlestep_enabled ||
|
||||
(flags & HF_INHIBIT_IRQ_MASK));
|
||||
/* Do not optimize repz jumps at all in icount mode, because
|
||||
|
@ -8505,11 +8513,6 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
|
|||
additional step for ecx=0 when icount is enabled.
|
||||
*/
|
||||
dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT);
|
||||
#if 0
|
||||
/* check addseg logic */
|
||||
if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
|
||||
printf("ERROR addseg\n");
|
||||
#endif
|
||||
|
||||
dc->T0 = tcg_temp_new();
|
||||
dc->T1 = tcg_temp_new();
|
||||
|
|
Loading…
Reference in New Issue