Replaced some AVX2 instructions with non-AVX2 instructions if unsupported by CPU

This commit is contained in:
Dr. Chat 2015-05-01 17:34:05 -05:00
parent cad6ca6148
commit 414e5b2d30
1 changed files with 510 additions and 150 deletions

View File

@ -3075,6 +3075,9 @@ EMITTER_OPCODE_TABLE(
EMITTER(MUL_I8, MATCH(I<OPCODE_MUL, I8<>, I8<>, I8<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// dest hi, dest low = src * edx
// TODO(justin): Find a way to shorten this has call
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
// TODO(benvanik): place src2 in edx?
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant);
@ -3089,11 +3092,38 @@ EMITTER(MUL_I8, MATCH(I<OPCODE_MUL, I8<>, I8<>, I8<>>)) {
e.movzx(e.edx, i.src2);
e.mulx(e.edx, i.dest.reg().cvt32(), i.src1.reg().cvt32());
}
} else {
// x86 mul instruction
// EDX:EAX <- EAX * $1;
//e.DebugBreak();
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant);
e.mov(e.eax, i.src1);
e.mul(i.src2);
e.mov(i.dest, e.eax);
} else if (i.src2.is_constant) {
assert_true(!i.src1.is_constant);
e.mov(e.eax, i.src2);
e.mul(i.src1);
e.mov(i.dest, e.eax);
} else {
e.movzx(e.eax, i.src1);
e.mul(i.src2);
e.mov(i.dest, e.eax);
}
}
e.ReloadEDX();
}
};
EMITTER(MUL_I16, MATCH(I<OPCODE_MUL, I16<>, I16<>, I16<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// dest hi, dest low = src * edx
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
// TODO(benvanik): place src2 in edx?
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant);
@ -3108,12 +3138,39 @@ EMITTER(MUL_I16, MATCH(I<OPCODE_MUL, I16<>, I16<>, I16<>>)) {
e.movzx(e.edx, i.src2);
e.mulx(e.edx, i.dest.reg().cvt32(), i.src1.reg().cvt32());
}
} else {
// x86 mul instruction
// EDX:EAX <- EAX * REG;
//e.DebugBreak();
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant);
e.mov(e.eax, i.src1.constant());
e.mul(i.src2);
e.mov(i.dest, e.eax);
} else if (i.src2.is_constant) {
assert_true(!i.src1.is_constant);
e.mov(e.eax, i.src2.constant());
e.mul(i.src1);
e.mov(i.dest, e.eax);
} else {
e.movzx(e.eax, i.src1);
e.mul(i.src2);
e.mov(i.dest, e.eax);
}
}
e.ReloadEDX();
}
};
EMITTER(MUL_I32, MATCH(I<OPCODE_MUL, I32<>, I32<>, I32<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// dest hi, dest low = src * edx
// mulx: edx src, 1st op high half, 2nd op low half, 3rd op src2
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
// TODO(benvanik): place src2 in edx?
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant);
@ -3128,12 +3185,41 @@ EMITTER(MUL_I32, MATCH(I<OPCODE_MUL, I32<>, I32<>, I32<>>)) {
e.mov(e.edx, i.src2);
e.mulx(e.edx, i.dest, i.src1);
}
} else {
// x86 mul instruction
// EDX:EAX < EAX * REG(op1);
//e.DebugBreak();
// is_constant AKA not a register
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant); // can't multiply 2 constants
e.mov(e.eax, i.src1.constant());
e.mul(i.src2);
e.mov(i.dest, e.eax);
} else if (i.src2.is_constant) {
assert_true(!i.src1.is_constant); // can't multiply 2 constants
e.mov(e.eax, i.src2.constant());
e.mul(i.src1);
e.mov(i.dest, e.eax);
} else {
e.mov(e.eax, i.src1);
e.mul(i.src2);
e.mov(i.dest, e.eax);
}
}
e.ReloadEDX();
}
};
EMITTER(MUL_I64, MATCH(I<OPCODE_MUL, I64<>, I64<>, I64<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// dest hi, dest low = src * rdx
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
// mulx: edx src, 1st op high half, 2nd op low half, 3rd op src2
// TODO(benvanik): place src2 in edx?
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant);
@ -3148,6 +3234,30 @@ EMITTER(MUL_I64, MATCH(I<OPCODE_MUL, I64<>, I64<>, I64<>>)) {
e.mov(e.rdx, i.src2);
e.mulx(e.rdx, i.dest, i.src1);
}
} else {
// x86 mul instruction
// EDX:EAX < EAX * REG(op1);
//e.DebugBreak();
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant); // can't multiply 2 constants
e.mov(e.rax, i.src1.constant());
e.mul(i.src2);
e.mov(i.dest, e.rax);
} else if (i.src2.is_constant) {
assert_true(!i.src1.is_constant); // can't multiply 2 constants
e.mov(e.rax, i.src2.constant());
e.mul(i.src1);
e.mov(i.dest, e.rax);
} else {
e.mov(e.rax, i.src1);
e.mul(i.src2);
e.mov(i.dest, e.rax);
}
}
e.ReloadEDX();
}
};
@ -3194,10 +3304,38 @@ EMITTER_OPCODE_TABLE(
// ============================================================================
EMITTER(MUL_HI_I8, MATCH(I<OPCODE_MUL_HI, I8<>, I8<>, I8<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// dest hi, dest low = src * rdx
// mulx: edx src, 1st op high half, 2nd op low half, 3rd op src2
if (i.instr->flags & ARITHMETIC_UNSIGNED) {
// TODO(justin): Find a way to shorten this has call
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
// TODO(benvanik): place src1 in eax? still need to sign extend
e.movzx(e.edx, i.src1);
e.mulx(i.dest.reg().cvt32(), e.eax, i.src2.reg().cvt32());
} else {
// x86 mul instruction
// EDX:EAX < EAX * REG(op1);
// is_constant AKA not a register
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant); // can't multiply 2 constants
e.mov(e.eax, i.src1.constant());
e.mul(i.src2);
e.mov(i.dest, e.edx);
} else if (i.src2.is_constant) {
assert_true(!i.src1.is_constant); // can't multiply 2 constants
e.mov(e.eax, i.src2.constant());
e.mul(i.src1);
e.mov(i.dest, e.edx);
} else {
e.movzx(e.eax, i.src1);
e.mul(i.src2);
e.mov(i.dest, e.edx);
}
}
} else {
e.mov(e.al, i.src1);
if (i.src2.is_constant) {
@ -3214,9 +3352,34 @@ EMITTER(MUL_HI_I8, MATCH(I<OPCODE_MUL_HI, I8<>, I8<>, I8<>>)) {
EMITTER(MUL_HI_I16, MATCH(I<OPCODE_MUL_HI, I16<>, I16<>, I16<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
if (i.instr->flags & ARITHMETIC_UNSIGNED) {
// TODO(justin): Find a way to shorten this has call
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
// TODO(benvanik): place src1 in eax? still need to sign extend
e.movzx(e.edx, i.src1);
e.mulx(i.dest.reg().cvt32(), e.eax, i.src2.reg().cvt32());
} else {
// x86 mul instruction
// EDX:EAX < EAX * REG(op1);
// is_constant AKA not a register
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant); // can't multiply 2 constants
e.mov(e.eax, i.src1.constant());
e.mul(i.src2);
e.mov(i.dest, e.edx);
} else if (i.src2.is_constant) {
assert_true(!i.src1.is_constant); // can't multiply 2 constants
e.mov(e.eax, i.src2.constant());
e.mul(i.src1);
e.mov(i.dest, e.edx);
} else {
e.movzx(e.eax, i.src1);
e.mul(i.src2);
e.mov(i.dest, e.edx);
}
}
} else {
e.mov(e.ax, i.src1);
if (i.src2.is_constant) {
@ -3233,6 +3396,8 @@ EMITTER(MUL_HI_I16, MATCH(I<OPCODE_MUL_HI, I16<>, I16<>, I16<>>)) {
EMITTER(MUL_HI_I32, MATCH(I<OPCODE_MUL_HI, I32<>, I32<>, I32<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
if (i.instr->flags & ARITHMETIC_UNSIGNED) {
// TODO(justin): Find a way to shorten this has call
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
// TODO(benvanik): place src1 in eax? still need to sign extend
e.mov(e.edx, i.src1);
if (i.src2.is_constant) {
@ -3241,6 +3406,29 @@ EMITTER(MUL_HI_I32, MATCH(I<OPCODE_MUL_HI, I32<>, I32<>, I32<>>)) {
} else {
e.mulx(i.dest, e.edx, i.src2);
}
} else {
// x86 mul instruction
// EDX:EAX < EAX * REG(op1);
// is_constant AKA not a register
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant); // can't multiply 2 constants
e.mov(e.eax, i.src1.constant());
e.mul(i.src2);
e.mov(i.dest, e.edx);
} else if (i.src2.is_constant) {
assert_true(!i.src1.is_constant); // can't multiply 2 constants
e.mov(e.eax, i.src2.constant());
e.mul(i.src1);
e.mov(i.dest, e.edx);
} else {
e.mov(e.eax, i.src1);
e.mul(i.src2);
e.mov(i.dest, e.edx);
}
}
} else {
e.mov(e.eax, i.src1);
if (i.src2.is_constant) {
@ -3257,6 +3445,8 @@ EMITTER(MUL_HI_I32, MATCH(I<OPCODE_MUL_HI, I32<>, I32<>, I32<>>)) {
EMITTER(MUL_HI_I64, MATCH(I<OPCODE_MUL_HI, I64<>, I64<>, I64<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
if (i.instr->flags & ARITHMETIC_UNSIGNED) {
// TODO(justin): Find a way to shorten this has call
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
// TODO(benvanik): place src1 in eax? still need to sign extend
e.mov(e.rdx, i.src1);
if (i.src2.is_constant) {
@ -3265,6 +3455,29 @@ EMITTER(MUL_HI_I64, MATCH(I<OPCODE_MUL_HI, I64<>, I64<>, I64<>>)) {
} else {
e.mulx(i.dest, e.rax, i.src2);
}
} else {
// x86 mul instruction
// EDX:EAX < EAX * REG(op1);
// is_constant AKA not a register
if (i.src1.is_constant) {
assert_true(!i.src2.is_constant); // can't multiply 2 constants
e.mov(e.rax, i.src1.constant());
e.mul(i.src2);
e.mov(i.dest, e.rdx);
} else if (i.src2.is_constant) {
assert_true(!i.src1.is_constant); // can't multiply 2 constants
e.mov(e.rax, i.src2.constant());
e.mul(i.src1);
e.mov(i.dest, e.rdx);
} else {
e.mov(e.rax, i.src1);
e.mul(i.src2);
e.mov(i.dest, e.rdx);
}
}
} else {
e.mov(e.rax, i.src1);
if (i.src2.is_constant) {
@ -3565,6 +3778,8 @@ EMITTER_OPCODE_TABLE(
// perhaps use other 132/213/etc
EMITTER(MUL_ADD_F32, MATCH(I<OPCODE_MUL_ADD, F32<>, F32<>, F32<>, F32<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// FMA extension
if (e.cpu()->has(Xbyak::util::Cpu::tFMA)) {
if (i.dest == i.src1) {
e.vfmadd213ss(i.dest, i.src2, i.src3);
} else {
@ -3577,10 +3792,26 @@ EMITTER(MUL_ADD_F32, MATCH(I<OPCODE_MUL_ADD, F32<>, F32<>, F32<>, F32<>>)) {
e.vmovss(i.dest, e.xmm0);
}
}
} else {
// TODO(justin): Test this
//e.DebugBreak();
// If i.dest == i.src3, back up i.src3 so we don't overwrite it.
if (i.dest == i.src3) {
e.vmovss(e.xmm0, i.src3);
e.vmulss(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vaddss(i.dest, i.dest, e.xmm0); // $0 = $1 + $2
} else {
e.vmulss(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vaddss(i.dest, i.dest, i.src3); // $0 = $1 + $2
}
}
}
};
EMITTER(MUL_ADD_F64, MATCH(I<OPCODE_MUL_ADD, F64<>, F64<>, F64<>, F64<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// FMA extension
if (e.cpu()->has(Xbyak::util::Cpu::tFMA)) {
if (i.dest == i.src1) {
e.vfmadd213sd(i.dest, i.src2, i.src3);
} else {
@ -3593,10 +3824,23 @@ EMITTER(MUL_ADD_F64, MATCH(I<OPCODE_MUL_ADD, F64<>, F64<>, F64<>, F64<>>)) {
e.vmovsd(i.dest, e.xmm0);
}
}
} else {
// If i.dest == i.src3, back up i.src3 so we don't overwrite it.
if (i.dest == i.src3) {
e.vmovsd(e.xmm0, i.src3);
e.vmulsd(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vaddsd(i.dest, i.dest, e.xmm0); // $0 = $1 + $2
} else {
e.vmulsd(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vaddsd(i.dest, i.dest, i.src3); // $0 = $1 + $2
}
}
}
};
EMITTER(MUL_ADD_V128, MATCH(I<OPCODE_MUL_ADD, V128<>, V128<>, V128<>, V128<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// FMA extension
if (e.cpu()->has(Xbyak::util::Cpu::tFMA)) {
if (i.dest == i.src1) {
e.vfmadd213ps(i.dest, i.src2, i.src3);
} else {
@ -3609,6 +3853,20 @@ EMITTER(MUL_ADD_V128, MATCH(I<OPCODE_MUL_ADD, V128<>, V128<>, V128<>, V128<>>))
e.vmovdqa(i.dest, e.xmm0);
}
}
} else {
// TODO(justin): Test this
//e.DebugBreak();
// If i.dest == i.src3, back up i.src3 so we don't overwrite it.
if (i.dest == i.src3) {
e.vmovdqa(e.xmm0, i.src3);
e.vmulps(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vaddps(i.dest, i.dest, e.xmm0); // $0 = $1 + $2
} else {
e.vmulps(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vaddps(i.dest, i.dest, i.src3); // $0 = $1 + $2
}
}
}
};
EMITTER_OPCODE_TABLE(
@ -3628,6 +3886,8 @@ EMITTER_OPCODE_TABLE(
// perhaps use other 132/213/etc
EMITTER(MUL_SUB_F32, MATCH(I<OPCODE_MUL_SUB, F32<>, F32<>, F32<>, F32<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// FMA extension
if (e.cpu()->has(Xbyak::util::Cpu::tFMA)) {
if (i.dest == i.src1) {
e.vfmsub213ss(i.dest, i.src2, i.src3);
} else {
@ -3640,10 +3900,26 @@ EMITTER(MUL_SUB_F32, MATCH(I<OPCODE_MUL_SUB, F32<>, F32<>, F32<>, F32<>>)) {
e.vmovss(i.dest, e.xmm0);
}
}
} else {
// TODO(justin): Test this
//e.DebugBreak();
// If i.dest == i.src3, back up i.src3 so we don't overwrite it.
if (i.dest == i.src3) {
e.vmovss(e.xmm0, i.src3);
e.vmulss(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vsubss(i.dest, i.dest, e.xmm0); // $0 = $1 - $2
} else {
e.vmulss(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vsubss(i.dest, i.dest, i.src3); // $0 = $1 - $2
}
}
}
};
EMITTER(MUL_SUB_F64, MATCH(I<OPCODE_MUL_SUB, F64<>, F64<>, F64<>, F64<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// FMA extension
if (e.cpu()->has(Xbyak::util::Cpu::tFMA)) {
if (i.dest == i.src1) {
e.vfmsub213sd(i.dest, i.src2, i.src3);
} else {
@ -3656,10 +3932,26 @@ EMITTER(MUL_SUB_F64, MATCH(I<OPCODE_MUL_SUB, F64<>, F64<>, F64<>, F64<>>)) {
e.vmovsd(i.dest, e.xmm0);
}
}
} else {
// TODO(justin): Test this
//e.DebugBreak();
// If i.dest == i.src3, back up i.src3 so we don't overwrite it.
if (i.dest == i.src3) {
e.vmovdqa(e.xmm0, i.src3);
e.vmulsd(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vsubsd(i.dest, i.dest, e.xmm0); // $0 = $1 - $2
} else {
e.vmulsd(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vsubsd(i.dest, i.dest, i.src3); // $0 = $1 - $2
}
}
}
};
EMITTER(MUL_SUB_V128, MATCH(I<OPCODE_MUL_SUB, V128<>, V128<>, V128<>, V128<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
// FMA extension
if (e.cpu()->has(Xbyak::util::Cpu::tFMA)) {
if (i.dest == i.src1) {
e.vfmsub213ps(i.dest, i.src2, i.src3);
} else {
@ -3672,6 +3964,20 @@ EMITTER(MUL_SUB_V128, MATCH(I<OPCODE_MUL_SUB, V128<>, V128<>, V128<>, V128<>>))
e.vmovdqa(i.dest, e.xmm0);
}
}
} else {
// TODO(justin): Test this
//e.DebugBreak();
// If i.dest == i.src3, back up i.src3 so we don't overwrite it.
if (i.dest == i.src3) {
e.vmovdqa(e.xmm0, i.src3);
e.vmulps(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vsubps(i.dest, i.dest, e.xmm0); // $0 = $1 - $2
} else {
e.vmulps(i.dest, i.src1, i.src2); // $0 = $1 * $2
e.vsubps(i.dest, i.dest, i.src3); // $0 = $1 - $2
}
}
}
};
EMITTER_OPCODE_TABLE(
@ -4160,11 +4466,23 @@ void EmitShlXX(X64Emitter& e, const ARGS& i) {
SEQ::EmitAssociativeBinaryOp(
e, i,
[](X64Emitter& e, const REG& dest_src, const Reg8& src) {
// shlx: $1 = $2 << $3
// shl: $1 = $1 << $2
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
if (dest_src.getBit() == 64) {
e.shlx(dest_src.cvt64(), dest_src.cvt64(), src.cvt64());
} else {
e.shlx(dest_src.cvt32(), dest_src.cvt32(), src.cvt32());
}
} else {
// back up ecx...
e.mov(e.al, e.cl);
e.mov(e.cl, src);
e.shl(dest_src, e.cl);
e.mov(e.cl, e.al);
}
}, [](X64Emitter& e, const REG& dest_src, int8_t constant) {
e.shl(dest_src, constant);
});
@ -4206,6 +4524,9 @@ void EmitShrXX(X64Emitter& e, const ARGS& i) {
SEQ::EmitAssociativeBinaryOp(
e, i,
[](X64Emitter& e, const REG& dest_src, const Reg8& src) {
// shrx: op1 dest, op2 src, op3 count
// shr: op1 src/dest, op2 count
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
if (dest_src.getBit() == 64) {
e.shrx(dest_src.cvt64(), dest_src.cvt64(), src.cvt64());
} else if (dest_src.getBit() == 32) {
@ -4214,6 +4535,15 @@ void EmitShrXX(X64Emitter& e, const ARGS& i) {
e.movzx(dest_src.cvt32(), dest_src);
e.shrx(dest_src.cvt32(), dest_src.cvt32(), src.cvt32());
}
} else {
// back up ecx...
e.mov(e.al, e.cl);
e.mov(e.cl, src);
e.shr(dest_src, e.cl);
e.mov(e.cl, e.al);
}
}, [](X64Emitter& e, const REG& dest_src, int8_t constant) {
e.shr(dest_src, constant);
});
@ -5015,8 +5345,10 @@ EMITTER_OPCODE_TABLE(
// ============================================================================
// OPCODE_SPLAT
// ============================================================================
// Copy a value into all elements of a vector
EMITTER(SPLAT_I8, MATCH(I<OPCODE_SPLAT, V128<>, I8<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
if (i.src1.is_constant) {
// TODO(benvanik): faster constant splats.
e.mov(e.al, i.src1.constant());
@ -5026,10 +5358,24 @@ EMITTER(SPLAT_I8, MATCH(I<OPCODE_SPLAT, V128<>, I8<>>)) {
e.vmovd(e.xmm0, i.src1.reg().cvt32());
e.vpbroadcastb(i.dest, e.xmm0);
}
} else {
// TODO(justin): Test this (is this proper behavior?)
//e.DebugBreak();
if (i.src1.is_constant) {
e.mov(e.eax, i.src1.constant());
e.vmovd(e.xmm0, e.eax);
e.vshufps(i.dest, e.xmm0, e.xmm0, 0);
} else {
e.vmovd(e.xmm0, i.src1.reg().cvt32());
e.vshufps(i.dest, e.xmm0, e.xmm0, 0);
}
}
}
};
EMITTER(SPLAT_I16, MATCH(I<OPCODE_SPLAT, V128<>, I16<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
if (i.src1.is_constant) {
// TODO(benvanik): faster constant splats.
e.mov(e.ax, i.src1.constant());
@ -5039,10 +5385,15 @@ EMITTER(SPLAT_I16, MATCH(I<OPCODE_SPLAT, V128<>, I16<>>)) {
e.vmovd(e.xmm0, i.src1.reg().cvt32());
e.vpbroadcastw(i.dest, e.xmm0);
}
} else {
// TODO(justin)
e.DebugBreak();
}
}
};
EMITTER(SPLAT_I32, MATCH(I<OPCODE_SPLAT, V128<>, I32<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
if (i.src1.is_constant) {
// TODO(benvanik): faster constant splats.
e.mov(e.eax, i.src1.constant());
@ -5052,10 +5403,15 @@ EMITTER(SPLAT_I32, MATCH(I<OPCODE_SPLAT, V128<>, I32<>>)) {
e.vmovd(e.xmm0, i.src1);
e.vpbroadcastd(i.dest, e.xmm0);
}
} else {
// TODO(justin)
e.DebugBreak();
}
}
};
EMITTER(SPLAT_F32, MATCH(I<OPCODE_SPLAT, V128<>, F32<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
if (e.cpu()->has(Xbyak::util::Cpu::tAVX2)) {
if (i.src1.is_constant) {
// TODO(benvanik): faster constant splats.
e.mov(e.eax, i.src1.value->constant.i32);
@ -5064,6 +5420,10 @@ EMITTER(SPLAT_F32, MATCH(I<OPCODE_SPLAT, V128<>, F32<>>)) {
} else {
e.vbroadcastss(i.dest, i.src1);
}
} else {
// TODO(justin)
e.DebugBreak();
}
}
};
EMITTER_OPCODE_TABLE(