target/i386: group common checks in the decoding phase

In preparation for adding more similar checks, move the VEX.L=0 check
and several X86_SPECIAL_* checks to a new field, where each bit represent
a common check on unused bits, or a restriction on the processor mode.

Likewise, many SVM intercepts can be checked during the decoding phase,
the main exception being the selective CR0 write, MSR and IOIO intercepts.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2023-10-09 17:43:12 +02:00
parent 48adb24049
commit 183e6679e3
3 changed files with 85 additions and 37 deletions

View File

@ -90,8 +90,6 @@
X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__)
#define cpuid(feat) .cpuid = X86_FEAT_##feat,
#define i64 .special = X86_SPECIAL_i64,
#define o64 .special = X86_SPECIAL_o64,
#define xchg .special = X86_SPECIAL_Locked,
#define mmx .special = X86_SPECIAL_MMX,
#define zext0 .special = X86_SPECIAL_ZExtOp0,
@ -114,6 +112,9 @@
#define vex12 .vex_class = 12,
#define vex13 .vex_class = 13,
#define chk(a) .check = X86_CHECK_##a,
#define svm(a) .intercept = SVM_EXIT_##a,
#define avx2_256 .vex_special = X86_VEX_AVX2_256,
#define P_00 1
@ -161,8 +162,8 @@ static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry,
};
static const X86OpEntry group15_mem[8] = {
[2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5),
[3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5),
[2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5 chk(VEX128)),
[3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5 chk(VEX128)),
};
uint8_t modrm = get_modrm(s, env);
@ -1590,6 +1591,12 @@ static bool validate_vex(DisasContext *s, X86DecodedInsn *decode)
if (s->flags & HF_EM_MASK) {
goto illegal;
}
if (e->check & X86_CHECK_VEX128) {
if (s->vex_l) {
goto illegal;
}
}
return true;
nm_exception:
@ -1775,6 +1782,25 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
goto illegal_op;
}
/* Checks that result in #UD come first. */
if (decode.e.check) {
if (decode.e.check & X86_CHECK_i64) {
if (CODE64(s)) {
goto illegal_op;
}
}
if (decode.e.check & X86_CHECK_o64) {
if (!CODE64(s)) {
goto illegal_op;
}
}
if (decode.e.check & X86_CHECK_prot) {
if (!PE(s) || VM86(s)) {
goto illegal_op;
}
}
}
switch (decode.e.special) {
case X86_SPECIAL_None:
break;
@ -1785,23 +1811,6 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
}
break;
case X86_SPECIAL_ProtMode:
if (!PE(s) || VM86(s)) {
goto illegal_op;
}
break;
case X86_SPECIAL_i64:
if (CODE64(s)) {
goto illegal_op;
}
break;
case X86_SPECIAL_o64:
if (!CODE64(s)) {
goto illegal_op;
}
break;
case X86_SPECIAL_ZExtOp0:
assert(decode.op[0].unit == X86_OP_INT);
if (!decode.op[0].has_ea) {
@ -1831,6 +1840,37 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
if (!validate_vex(s, &decode)) {
return;
}
/*
* Checks that result in #GP or VMEXIT come second. Intercepts are
* generally checked after non-memory exceptions (i.e. before all
* exceptions if there is no memory operand). Exceptions are
* vm86 checks (INTn, IRET, PUSHF/POPF), RSM and XSETBV (!).
*
* RSM and XSETBV will be handled in the gen_* functions
* instead of using chk().
*/
if (decode.e.check & X86_CHECK_cpl0) {
if (CPL(s) != 0) {
goto gp_fault;
}
}
if (decode.e.intercept && unlikely(GUEST(s))) {
gen_helper_svm_check_intercept(tcg_env,
tcg_constant_i32(decode.e.intercept));
}
if (decode.e.check) {
if ((decode.e.check & X86_CHECK_vm86_iopl) && VM86(s)) {
if (IOPL(s) < 3) {
goto gp_fault;
}
} else if (decode.e.check & X86_CHECK_cpl_iopl) {
if (IOPL(s) < CPL(s)) {
goto gp_fault;
}
}
}
if (decode.e.special == X86_SPECIAL_MMX &&
!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) {
gen_helper_enter_mmx(tcg_env);
@ -1857,6 +1897,9 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
gen_writeback(s, &decode, 0, s->T0);
}
return;
gp_fault:
gen_exception_gpf(s);
return;
illegal_op:
gen_illegal_opcode(s);
return;

View File

@ -131,15 +131,30 @@ typedef enum X86OpUnit {
X86_OP_MMX, /* address in either s->ptrX or s->A0 depending on has_ea */
} X86OpUnit;
typedef enum X86InsnCheck {
/* Illegal or exclusive to 64-bit mode */
X86_CHECK_i64 = 1,
X86_CHECK_o64 = 2,
/* Fault outside protected mode */
X86_CHECK_prot = 4,
/* Privileged instruction checks */
X86_CHECK_cpl0 = 8,
X86_CHECK_vm86_iopl = 16,
X86_CHECK_cpl_iopl = 32,
X86_CHECK_iopl = X86_CHECK_cpl_iopl | X86_CHECK_vm86_iopl,
/* Fault if VEX.L=1 */
X86_CHECK_VEX128 = 64,
} X86InsnCheck;
typedef enum X86InsnSpecial {
X86_SPECIAL_None,
/* Always locked if it has a memory operand (XCHG) */
X86_SPECIAL_Locked,
/* Fault outside protected mode */
X86_SPECIAL_ProtMode,
/*
* Register operand 0/2 is zero extended to 32 bits. Rd/Mb or Rd/Mw
* in the manual.
@ -158,10 +173,6 @@ typedef enum X86InsnSpecial {
* become P/P/Q/N, and size "x" becomes "q".
*/
X86_SPECIAL_MMX,
/* Illegal or exclusive to 64-bit mode */
X86_SPECIAL_i64,
X86_SPECIAL_o64,
} X86InsnSpecial;
/*
@ -224,7 +235,9 @@ struct X86OpEntry {
X86CPUIDFeature cpuid:8;
unsigned vex_class:8;
X86VEXSpecial vex_special:8;
uint16_t valid_prefix:16;
unsigned valid_prefix:16;
unsigned check:16;
unsigned intercept:8;
bool is_decode:1;
};

View File

@ -1236,10 +1236,6 @@ static void gen_INSERTQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *dec
static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
if (s->vex_l) {
gen_illegal_opcode(s);
return;
}
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T1);
gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
}
@ -1886,10 +1882,6 @@ static void gen_VAESKEYGEN(DisasContext *s, CPUX86State *env, X86DecodedInsn *de
static void gen_STMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
if (s->vex_l) {
gen_illegal_opcode(s);
return;
}
gen_helper_update_mxcsr(tcg_env);
tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
}