tcg/i386: Do not expand cmpsel_vec early

Expand during output instead of during opcode generation.
Remove x86_vpblendvb_vec opcode, this this removes the only user.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2024-09-06 20:32:38 -07:00
parent b8a567039a
commit db4121d207
4 changed files with 53 additions and 35 deletions

View File

@ -50,6 +50,7 @@ C_N1_I2(r, r, r)
C_N1_I2(r, r, rW) C_N1_I2(r, r, rW)
C_O1_I3(x, 0, x, x) C_O1_I3(x, 0, x, x)
C_O1_I3(x, x, x, x) C_O1_I3(x, x, x, x)
C_O1_I4(x, x, x, x, x)
C_O1_I4(r, r, reT, r, 0) C_O1_I4(r, r, reT, r, 0)
C_O1_I4(r, r, r, ri, ri) C_O1_I4(r, r, r, ri, ri)
C_O2_I1(r, r, L) C_O2_I1(r, r, L)

View File

@ -3115,6 +3115,19 @@ static void tcg_out_cmp_vec(TCGContext *s, TCGType type, unsigned vece,
} }
} }
static void tcg_out_cmpsel_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg v0, TCGReg c1, TCGReg c2,
TCGReg v3, TCGReg v4, TCGCond cond)
{
if (tcg_out_cmp_vec_noinv(s, type, vece, TCG_TMP_VEC, c1, c2, cond)) {
TCGReg swap = v3;
v3 = v4;
v4 = swap;
}
tcg_out_vex_modrm_type(s, OPC_VPBLENDVB, v0, v4, v3, type);
tcg_out8(s, (TCG_TMP_VEC - TCG_REG_XMM0) << 4);
}
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece, unsigned vecl, unsigned vece,
const TCGArg args[TCG_MAX_OP_ARGS], const TCGArg args[TCG_MAX_OP_ARGS],
@ -3320,6 +3333,11 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
tcg_out_cmp_vec(s, type, vece, a0, a1, a2, args[3]); tcg_out_cmp_vec(s, type, vece, a0, a1, a2, args[3]);
break; break;
case INDEX_op_cmpsel_vec:
tcg_out_cmpsel_vec(s, type, vece, a0, a1, a2,
args[3], args[4], args[5]);
break;
case INDEX_op_andc_vec: case INDEX_op_andc_vec:
insn = OPC_PANDN; insn = OPC_PANDN;
tcg_out_vex_modrm_type(s, insn, a0, a2, a1, type); tcg_out_vex_modrm_type(s, insn, a0, a2, a1, type);
@ -3431,11 +3449,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
tcg_out8(s, sub); tcg_out8(s, sub);
break; break;
case INDEX_op_x86_vpblendvb_vec:
tcg_out_vex_modrm_type(s, OPC_VPBLENDVB, a0, a1, a2, type);
tcg_out8(s, args[3] << 4);
break;
case INDEX_op_x86_psrldq_vec: case INDEX_op_x86_psrldq_vec:
tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1); tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1);
tcg_out8(s, a2); tcg_out8(s, a2);
@ -3701,8 +3714,9 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
return C_O1_I3(x, 0, x, x); return C_O1_I3(x, 0, x, x);
case INDEX_op_bitsel_vec: case INDEX_op_bitsel_vec:
case INDEX_op_x86_vpblendvb_vec:
return C_O1_I3(x, x, x, x); return C_O1_I3(x, x, x, x);
case INDEX_op_cmpsel_vec:
return C_O1_I4(x, x, x, x, x);
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -4038,8 +4052,8 @@ static void expand_vec_mul(TCGType type, unsigned vece,
} }
} }
static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, static TCGCond expand_vec_cond(TCGType type, unsigned vece,
TCGv_vec v1, TCGv_vec v2, TCGCond cond) TCGArg *a1, TCGArg *a2, TCGCond cond)
{ {
/* /*
* Without AVX512, there are no 64-bit unsigned comparisons. * Without AVX512, there are no 64-bit unsigned comparisons.
@ -4047,46 +4061,50 @@ static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
* All other swapping and inversion are handled during code generation. * All other swapping and inversion are handled during code generation.
*/ */
if (vece == MO_64 && is_unsigned_cond(cond)) { if (vece == MO_64 && is_unsigned_cond(cond)) {
TCGv_vec v1 = temp_tcgv_vec(arg_temp(*a1));
TCGv_vec v2 = temp_tcgv_vec(arg_temp(*a2));
TCGv_vec t1 = tcg_temp_new_vec(type); TCGv_vec t1 = tcg_temp_new_vec(type);
TCGv_vec t2 = tcg_temp_new_vec(type); TCGv_vec t2 = tcg_temp_new_vec(type);
TCGv_vec t3 = tcg_constant_vec(type, vece, 1ull << ((8 << vece) - 1)); TCGv_vec t3 = tcg_constant_vec(type, vece, 1ull << ((8 << vece) - 1));
tcg_gen_sub_vec(vece, t1, v1, t3); tcg_gen_sub_vec(vece, t1, v1, t3);
tcg_gen_sub_vec(vece, t2, v2, t3); tcg_gen_sub_vec(vece, t2, v2, t3);
v1 = t1; *a1 = tcgv_vec_arg(t1);
v2 = t2; *a2 = tcgv_vec_arg(t2);
cond = tcg_signed_cond(cond); cond = tcg_signed_cond(cond);
} }
return cond;
/* Expand directly; do not recurse. */
vec_gen_4(INDEX_op_cmp_vec, type, vece,
tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
} }
static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0, static void expand_vec_cmp(TCGType type, unsigned vece, TCGArg a0,
TCGv_vec c1, TCGv_vec c2, TCGArg a1, TCGArg a2, TCGCond cond)
TCGv_vec v3, TCGv_vec v4, TCGCond cond)
{ {
TCGv_vec t = tcg_temp_new_vec(type); cond = expand_vec_cond(type, vece, &a1, &a2, cond);
/* Expand directly; do not recurse. */
vec_gen_4(INDEX_op_cmp_vec, type, vece, a0, a1, a2, cond);
}
expand_vec_cmp(type, vece, t, c1, c2, cond); static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGArg a0,
vec_gen_4(INDEX_op_x86_vpblendvb_vec, type, vece, TCGArg a1, TCGArg a2,
tcgv_vec_arg(v0), tcgv_vec_arg(v4), TCGArg a3, TCGArg a4, TCGCond cond)
tcgv_vec_arg(v3), tcgv_vec_arg(t)); {
tcg_temp_free_vec(t); cond = expand_vec_cond(type, vece, &a1, &a2, cond);
/* Expand directly; do not recurse. */
vec_gen_6(INDEX_op_cmpsel_vec, type, vece, a0, a1, a2, a3, a4, cond);
} }
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg a0, ...) TCGArg a0, ...)
{ {
va_list va; va_list va;
TCGArg a2; TCGArg a1, a2, a3, a4, a5;
TCGv_vec v0, v1, v2, v3, v4; TCGv_vec v0, v1, v2;
va_start(va, a0); va_start(va, a0);
v0 = temp_tcgv_vec(arg_temp(a0)); a1 = va_arg(va, TCGArg);
v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
a2 = va_arg(va, TCGArg); a2 = va_arg(va, TCGArg);
v0 = temp_tcgv_vec(arg_temp(a0));
v1 = temp_tcgv_vec(arg_temp(a1));
switch (opc) { switch (opc) {
case INDEX_op_shli_vec: case INDEX_op_shli_vec:
@ -4122,15 +4140,15 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
break; break;
case INDEX_op_cmp_vec: case INDEX_op_cmp_vec:
v2 = temp_tcgv_vec(arg_temp(a2)); a3 = va_arg(va, TCGArg);
expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg)); expand_vec_cmp(type, vece, a0, a1, a2, a3);
break; break;
case INDEX_op_cmpsel_vec: case INDEX_op_cmpsel_vec:
v2 = temp_tcgv_vec(arg_temp(a2)); a3 = va_arg(va, TCGArg);
v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); a4 = va_arg(va, TCGArg);
v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); a5 = va_arg(va, TCGArg);
expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg)); expand_vec_cmpsel(type, vece, a0, a1, a2, a3, a4, a5);
break; break;
default: default:

View File

@ -223,7 +223,7 @@ typedef enum {
#define TCG_TARGET_HAS_sat_vec 1 #define TCG_TARGET_HAS_sat_vec 1
#define TCG_TARGET_HAS_minmax_vec 1 #define TCG_TARGET_HAS_minmax_vec 1
#define TCG_TARGET_HAS_bitsel_vec have_avx512vl #define TCG_TARGET_HAS_bitsel_vec have_avx512vl
#define TCG_TARGET_HAS_cmpsel_vec -1 #define TCG_TARGET_HAS_cmpsel_vec 1
#define TCG_TARGET_HAS_tst_vec 0 #define TCG_TARGET_HAS_tst_vec 0
#define TCG_TARGET_deposit_i32_valid(ofs, len) \ #define TCG_TARGET_deposit_i32_valid(ofs, len) \

View File

@ -25,7 +25,6 @@
*/ */
DEF(x86_shufps_vec, 1, 2, 1, IMPLVEC) DEF(x86_shufps_vec, 1, 2, 1, IMPLVEC)
DEF(x86_vpblendvb_vec, 1, 3, 0, IMPLVEC)
DEF(x86_blend_vec, 1, 2, 1, IMPLVEC) DEF(x86_blend_vec, 1, 2, 1, IMPLVEC)
DEF(x86_packss_vec, 1, 2, 0, IMPLVEC) DEF(x86_packss_vec, 1, 2, 0, IMPLVEC)
DEF(x86_packus_vec, 1, 2, 0, IMPLVEC) DEF(x86_packus_vec, 1, 2, 0, IMPLVEC)