diff --git a/src/alloy/backend/x64/lowering/lowering_sequences.cc b/src/alloy/backend/x64/lowering/lowering_sequences.cc index 564f66bf3..c2969be3f 100644 --- a/src/alloy/backend/x64/lowering/lowering_sequences.cc +++ b/src/alloy/backend/x64/lowering/lowering_sequences.cc @@ -154,564 +154,13 @@ void IssueCallIndirect(X64Emitter& e, Value* target, uint32_t flags) { } } -// Sets EFLAGs with zf for the given value. -// ZF = 1 if false, 0 = true (so jz = jump if false) -void CheckBoolean(X64Emitter& e, Value* v) { - if (v->IsConstant()) { - e.mov(e.ah, (v->IsConstantZero() ? 1 : 0) << 6); - e.sahf(); - } else if (v->type == INT8_TYPE) { - Reg8 src; - e.BeginOp(v, src, 0); - e.test(src, src); - e.EndOp(src); - } else if (v->type == INT16_TYPE) { - Reg16 src; - e.BeginOp(v, src, 0); - e.test(src, src); - e.EndOp(src); - } else if (v->type == INT32_TYPE) { - Reg32 src; - e.BeginOp(v, src, 0); - e.test(src, src); - e.EndOp(src); - } else if (v->type == INT64_TYPE) { - Reg64 src; - e.BeginOp(v, src, 0); - e.test(src, src); - e.EndOp(src); - } else if (v->type == FLOAT32_TYPE) { - UNIMPLEMENTED_SEQ(); - } else if (v->type == FLOAT64_TYPE) { - UNIMPLEMENTED_SEQ(); - } else if (v->type == VEC128_TYPE) { - UNIMPLEMENTED_SEQ(); - } else { - ASSERT_INVALID_TYPE(); - } -} - -void CompareXX(X64Emitter& e, Instr*& i, void(set_fn)(X64Emitter& e, Reg8& dest, bool invert)) { - if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8, SIG_TYPE_I8)) { - Reg8 dest; - Reg8 src1, src2; - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0, - i->src2.value, src2, 0); - e.cmp(src1, src2); - set_fn(e, dest, false); - e.EndOp(dest, src1, src2); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8, SIG_TYPE_I8C)) { - Reg8 dest; - Reg8 src1; - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0); - e.cmp(src1, i->src2.value->constant.i8); - set_fn(e, dest, false); - e.EndOp(dest, src1); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8C, SIG_TYPE_I8)) { - Reg8 dest; - Reg8 src2; - e.BeginOp(i->dest, dest, REG_DEST, - i->src2.value, src2, 0); - e.cmp(src2, i->src1.value->constant.i8); - set_fn(e, dest, true); - e.EndOp(dest, src2); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16, SIG_TYPE_I16)) { - Reg8 dest; - Reg16 src1, src2; - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0, - i->src2.value, src2, 0); - e.cmp(src1, src2); - set_fn(e, dest, false); - e.EndOp(dest, src1, src2); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16, SIG_TYPE_I16C)) { - Reg8 dest; - Reg16 src1; - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0); - e.cmp(src1, i->src2.value->constant.i16); - set_fn(e, dest, false); - e.EndOp(dest, src1); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16C, SIG_TYPE_I16)) { - Reg8 dest; - Reg16 src2; - e.BeginOp(i->dest, dest, REG_DEST, - i->src2.value, src2, 0); - e.cmp(src2, i->src1.value->constant.i16); - e.sete(dest); - set_fn(e, dest, true); - e.EndOp(dest, src2); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32, SIG_TYPE_I32)) { - Reg8 dest; - Reg32 src1, src2; - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0, - i->src2.value, src2, 0); - e.cmp(src1, src2); - set_fn(e, dest, false); - e.EndOp(dest, src1, src2); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32, SIG_TYPE_I32C)) { - Reg8 dest; - Reg32 src1; - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0); - e.cmp(src1, i->src2.value->constant.i32); - set_fn(e, dest, false); - e.EndOp(dest, src1); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32C, SIG_TYPE_I32)) { - Reg8 dest; - Reg32 src2; - e.BeginOp(i->dest, dest, REG_DEST, - i->src2.value, src2, 0); - e.cmp(src2, i->src1.value->constant.i32); - set_fn(e, dest, true); - e.EndOp(dest, src2); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64, SIG_TYPE_I64)) { - Reg8 dest; - Reg64 src1, src2; - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0, - i->src2.value, src2, 0); - e.cmp(src1, src2); - set_fn(e, dest, false); - e.EndOp(dest, src1, src2); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64, SIG_TYPE_I64C)) { - Reg8 dest; - Reg64 src1; - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0); - e.mov(e.rax, i->src2.value->constant.i64); - e.cmp(src1, e.rax); - set_fn(e, dest, false); - e.EndOp(dest, src1); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64C, SIG_TYPE_I64)) { - Reg8 dest; - Reg64 src2; - e.BeginOp(i->dest, dest, REG_DEST, - i->src2.value, src2, 0); - e.mov(e.rax, i->src1.value->constant.i64); - e.cmp(src2, e.rax); - set_fn(e, dest, true); - e.EndOp(dest, src2); - } else { - UNIMPLEMENTED_SEQ(); - } -}; - -typedef void(v_fn)(X64Emitter& e, Instr& i, const Reg& dest_src); -template -void IntUnaryOpV(X64Emitter& e, Instr*& i, v_fn v_fn, - T& dest, T& src1) { - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0); - if (dest == src1) { - v_fn(e, *i, dest); - } else { - e.mov(dest, src1); - v_fn(e, *i, dest); - } - e.EndOp(dest, src1); -} -template -void IntUnaryOpC(X64Emitter& e, Instr*& i, v_fn v_fn, - T& dest, Value* src1) { - e.BeginOp(i->dest, dest, REG_DEST); - e.mov(dest, (uint64_t)src1->get_constant(CT())); - v_fn(e, *i, dest); - e.EndOp(dest); -} -void IntUnaryOp(X64Emitter& e, Instr*& i, v_fn v_fn) { - if (i->Match(SIG_TYPE_I8, SIG_TYPE_I8)) { - Reg8 dest, src1; - IntUnaryOpV(e, i, v_fn, dest, src1); - } else if (i->Match(SIG_TYPE_I8, SIG_TYPE_I8C)) { - Reg8 dest; - IntUnaryOpC(e, i, v_fn, dest, i->src1.value); - } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16)) { - Reg16 dest, src1; - IntUnaryOpV(e, i, v_fn, dest, src1); - } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16C)) { - Reg16 dest; - IntUnaryOpC(e, i, v_fn, dest, i->src1.value); - } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32)) { - Reg32 dest, src1; - IntUnaryOpV(e, i, v_fn, dest, src1); - } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32C)) { - Reg32 dest; - IntUnaryOpC(e, i, v_fn, dest, i->src1.value); - } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64)) { - Reg64 dest, src1; - IntUnaryOpV(e, i, v_fn, dest, src1); - } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64C)) { - Reg64 dest; - IntUnaryOpC(e, i, v_fn, dest, i->src1.value); - } else { - ASSERT_INVALID_TYPE(); - } - if (i->flags & ARITHMETIC_SET_CARRY) { - // EFLAGS should have CA set? - // (so long as we don't fuck with it) - // UNIMPLEMENTED_SEQ(); - } -}; - -typedef void(vv_fn)(X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src); -typedef void(vc_fn)(X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src); -template -void IntBinaryOpVV(X64Emitter& e, Instr*& i, vv_fn vv_fn, - TD& dest, TS1& src1, TS2& src2) { - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0, - i->src2.value, src2, 0); - if (dest == src1) { - vv_fn(e, *i, dest, src2); - } else if (dest == src2) { - if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { - vv_fn(e, *i, dest, src1); - } else { - // Eww. - e.mov(e.rax, src1); - vv_fn(e, *i, e.rax, src2); - e.mov(dest, e.rax); - } - } else { - e.mov(dest, src1); - vv_fn(e, *i, dest, src2); - } - e.EndOp(dest, src1, src2); -} -template -void IntBinaryOpVC(X64Emitter& e, Instr*& i, vv_fn vv_fn, vc_fn vc_fn, - TD& dest, TS1& src1, Value* src2) { - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0); - if (dest.getBit() <= 32) { - // 32-bit. - if (dest == src1) { - vc_fn(e, *i, dest, (uint32_t)src2->get_constant(CT())); - } else { - e.mov(dest, src1); - vc_fn(e, *i, dest, (uint32_t)src2->get_constant(CT())); - } - } else { - // 64-bit. - if (dest == src1) { - e.mov(e.rax, src2->constant.i64); - vv_fn(e, *i, dest, e.rax); - } else { - e.mov(e.rax, src2->constant.i64); - e.mov(dest, src1); - vv_fn(e, *i, dest, e.rax); - } - } - e.EndOp(dest, src1); -} -template -void IntBinaryOpCV(X64Emitter& e, Instr*& i, vv_fn vv_fn, vc_fn vc_fn, - TD& dest, Value* src1, TS2& src2) { - e.BeginOp(i->dest, dest, REG_DEST, - i->src2.value, src2, 0); - if (dest.getBit() <= 32) { - // 32-bit. - if (dest == src2) { - if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { - vc_fn(e, *i, dest, (uint32_t)src1->get_constant(CT())); - } else { - // Eww. - e.mov(e.rax, src2); - e.mov(dest, (uint32_t)src1->get_constant(CT())); - vv_fn(e, *i, dest, e.rax); - } - } else { - e.mov(dest, src2); - vc_fn(e, *i, dest, (uint32_t)src1->get_constant(CT())); - } - } else { - // 64-bit. - if (dest == src2) { - if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { - e.mov(e.rax, src1->constant.i64); - vv_fn(e, *i, dest, e.rax); - } else { - // Eww. - e.mov(e.rax, src1->constant.i64); - vv_fn(e, *i, e.rax, src2); - e.mov(dest, e.rax); - } - } else { - e.mov(e.rax, src2); - e.mov(dest, src1->constant.i64); - vv_fn(e, *i, dest, e.rax); - } - } - e.EndOp(dest, src2); -} -void IntBinaryOp(X64Emitter& e, Instr*& i, vv_fn vv_fn, vc_fn vc_fn) { - // TODO(benvanik): table lookup. This linear scan is slow. - // Note: we assume DEST.type = SRC1.type, but that SRC2.type may vary. - XEASSERT(i->dest->type == i->src1.value->type); - if (i->Match(SIG_TYPE_I8, SIG_TYPE_I8, SIG_TYPE_I8)) { - Reg8 dest, src1, src2; - IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); - } else if (i->Match(SIG_TYPE_I8, SIG_TYPE_I8, SIG_TYPE_I8C)) { - Reg8 dest, src1; - IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); - } else if (i->Match(SIG_TYPE_I8, SIG_TYPE_I8C, SIG_TYPE_I8)) { - Reg8 dest, src2; - IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); - } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I16)) { - Reg16 dest, src1, src2; - IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); - } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I16C)) { - Reg16 dest, src1; - IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); - } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16C, SIG_TYPE_I16)) { - Reg16 dest, src2; - IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); - } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I32)) { - Reg32 dest, src1, src2; - IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); - } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I32C)) { - Reg32 dest, src1; - IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); - } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32C, SIG_TYPE_I32)) { - Reg32 dest, src2; - IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); - } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I64)) { - Reg64 dest, src1, src2; - IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); - } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I64C)) { - Reg64 dest, src1; - IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); - } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64C, SIG_TYPE_I64)) { - Reg64 dest, src2; - IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); - // Start forced src2=i8 - } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I8)) { - Reg16 dest, src1; - Reg8 src2; - IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); - } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I8C)) { - Reg16 dest, src1; - IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); - } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16C, SIG_TYPE_I8)) { - Reg16 dest; - Reg8 src2; - IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); - } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I8)) { - Reg32 dest, src1; - Reg8 src2; - IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); - } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I8C)) { - Reg32 dest, src1; - IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); - } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32C, SIG_TYPE_I8)) { - Reg32 dest; - Reg8 src2; - IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); - } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I8)) { - Reg64 dest, src1; - Reg8 src2; - IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); - } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I8C)) { - Reg64 dest, src1; - IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); - } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64C, SIG_TYPE_I8)) { - Reg64 dest; - Reg8 src2; - IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); - } else { - ASSERT_INVALID_TYPE(); - } - if (i->flags & ARITHMETIC_SET_CARRY) { - // EFLAGS should have CA set? - // (so long as we don't fuck with it) - // UNIMPLEMENTED_SEQ(); - } -}; - -typedef void(vvv_fn)(X64Emitter& e, Instr& i, const Reg& dest_src1, const Operand& src2, const Operand& src3); -typedef void(vvc_fn)(X64Emitter& e, Instr& i, const Reg& dest_src1, const Operand& src2, uint32_t src3); -typedef void(vcv_fn)(X64Emitter& e, Instr& i, const Reg& dest_src1, uint32_t src2, const Operand& src3); -template -void IntTernaryOpVVV(X64Emitter& e, Instr*& i, vvv_fn vvv_fn, - TD& dest, TS1& src1, TS2& src2, TS3& src3) { - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0, - i->src2.value, src2, 0, - i->src3.value, src3, 0); - if (dest == src1) { - vvv_fn(e, *i, dest, src2, src3); - } else if (dest == src2) { - if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { - vvv_fn(e, *i, dest, src1, src3); - } else { - UNIMPLEMENTED_SEQ(); - } - } else { - e.mov(dest, src1); - vvv_fn(e, *i, dest, src2, src3); - } - e.EndOp(dest, src1, src2, src3); -} -template -void IntTernaryOpVVC(X64Emitter& e, Instr*& i, vvv_fn vvv_fn, vvc_fn vvc_fn, - TD& dest, TS1& src1, TS2& src2, Value* src3) { - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0, - i->src2.value, src2, 0); - if (dest.getBit() <= 32) { - // 32-bit. - if (dest == src1) { - vvc_fn(e, *i, dest, src2, (uint32_t)src3->get_constant(CT())); - } else if (dest == src2) { - if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { - vvc_fn(e, *i, dest, src1, (uint32_t)src3->get_constant(CT())); - } else { - // Eww. - e.mov(e.rax, src2); - e.mov(dest, src1); - vvc_fn(e, *i, dest, e.rax, (uint32_t)src3->get_constant(CT())); - } - } else { - e.mov(dest, src1); - vvc_fn(e, *i, dest, src2, (uint32_t)src3->get_constant(CT())); - } - } else { - // 64-bit. - if (dest == src1) { - e.mov(e.rax, src3->constant.i64); - vvv_fn(e, *i, dest, src2, e.rax); - } else if (dest == src2) { - if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { - e.mov(e.rax, src3->constant.i64); - vvv_fn(e, *i, dest, src1, e.rax); - } else { - // Eww. - e.mov(e.rax, src1); - e.mov(src1, src2); - e.mov(dest, e.rax); - e.mov(e.rax, src3->constant.i64); - vvv_fn(e, *i, dest, src1, e.rax); - } - } else { - e.mov(e.rax, src3->constant.i64); - e.mov(dest, src1); - vvv_fn(e, *i, dest, src2, e.rax); - } - } - e.EndOp(dest, src1, src2); -} -template -void IntTernaryOpVCV(X64Emitter& e, Instr*& i, vvv_fn vvv_fn, vcv_fn vcv_fn, - TD& dest, TS1& src1, Value* src2, TS3& src3) { - e.BeginOp(i->dest, dest, REG_DEST, - i->src1.value, src1, 0, - i->src3.value, src3, 0); - if (dest.getBit() <= 32) { - // 32-bit. - if (dest == src1) { - vcv_fn(e, *i, dest, (uint32_t)src2->get_constant(CT()), src3); - } else if (dest == src3) { - if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { - vcv_fn(e, *i, dest, (uint32_t)src2->get_constant(CT()), src1); - } else { - // Eww. - e.mov(e.rax, src3); - e.mov(dest, src1); - vcv_fn(e, *i, dest, (uint32_t)src2->get_constant(CT()), e.rax); - } - } else { - e.mov(dest, src1); - vcv_fn(e, *i, dest, (uint32_t)src2->get_constant(CT()), src3); - } - } else { - // 64-bit. - if (dest == src1) { - e.mov(e.rax, src2->constant.i64); - vvv_fn(e, *i, dest, e.rax, src3); - } else if (dest == src3) { - if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { - e.mov(e.rax, src2->constant.i64); - vvv_fn(e, *i, dest, src1, e.rax); - } else { - // Eww. - e.mov(e.rax, src1); - e.mov(src1, src3); - e.mov(dest, e.rax); - e.mov(e.rax, src2->constant.i64); - vvv_fn(e, *i, dest, e.rax, src1); - } - } else { - e.mov(e.rax, src2->constant.i64); - e.mov(dest, src1); - vvv_fn(e, *i, dest, e.rax, src3); - } - } - e.EndOp(dest, src1, src3); -} -void IntTernaryOp(X64Emitter& e, Instr*& i, vvv_fn vvv_fn, vvc_fn vvc_fn, vcv_fn vcv_fn) { - // TODO(benvanik): table lookup. This linear scan is slow. - // Note: we assume DEST.type = SRC1.type = SRC2.type, but that SRC3.type may vary. - XEASSERT(i->dest->type == i->src1.value->type && - i->dest->type == i->src2.value->type); - // TODO(benvanik): table lookup. - if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8, SIG_TYPE_I8, SIG_TYPE_I8)) { - Reg8 dest, src1, src2; - Reg8 src3; - IntTernaryOpVVV(e, i, vvv_fn, dest, src1, src2, src3); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8, SIG_TYPE_I8, SIG_TYPE_I8C)) { - Reg8 dest, src1, src2; - IntTernaryOpVVC(e, i, vvv_fn, vvc_fn, dest, src1, src2, i->src3.value); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I8)) { - Reg16 dest, src1, src2; - Reg8 src3; - IntTernaryOpVVV(e, i, vvv_fn, dest, src1, src2, src3); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I8C)) { - Reg16 dest, src1, src2; - IntTernaryOpVVC(e, i, vvv_fn, vvc_fn, dest, src1, src2, i->src3.value); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I8)) { - Reg32 dest, src1, src2; - Reg8 src3; - IntTernaryOpVVV(e, i,vvv_fn, dest, src1, src2, src3); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I8C)) { - Reg32 dest, src1, src2; - IntTernaryOpVVC(e, i, vvv_fn, vvc_fn, dest, src1, src2, i->src3.value); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I8)) { - Reg64 dest, src1, src2; - Reg8 src3; - IntTernaryOpVVV(e, i, vvv_fn, dest, src1, src2, src3); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I8C)) { - Reg64 dest, src1, src2; - IntTernaryOpVVC(e, i, vvv_fn, vvc_fn, dest, src1, src2, i->src3.value); - // - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8, SIG_TYPE_I8C, SIG_TYPE_I8)) { - Reg8 dest, src1, src3; - IntTernaryOpVCV(e, i, vvv_fn, vcv_fn, dest, src1, i->src2.value, src3); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16, SIG_TYPE_I16C, SIG_TYPE_I8)) { - Reg16 dest, src1, src3; - IntTernaryOpVCV(e, i, vvv_fn, vcv_fn, dest, src1, i->src2.value, src3); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32, SIG_TYPE_I32C, SIG_TYPE_I8)) { - Reg32 dest, src1, src3; - IntTernaryOpVCV(e, i, vvv_fn, vcv_fn, dest, src1, i->src2.value, src3); - } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64, SIG_TYPE_I64C, SIG_TYPE_I8)) { - Reg64 dest, src1, src3; - IntTernaryOpVCV(e, i, vvv_fn, vcv_fn, dest, src1, i->src2.value, src3); - } else { - ASSERT_INVALID_TYPE(); - } - if (i->flags & ARITHMETIC_SET_CARRY) { - // EFLAGS should have CA set? - // (so long as we don't fuck with it) - // UNIMPLEMENTED_SEQ(); - } -} - } // namespace +// Major templating foo lives in here. +#include + + void alloy::backend::x64::lowering::RegisterSequences(LoweringTable* table) { // -------------------------------------------------------------------------- // General diff --git a/src/alloy/backend/x64/lowering/op_utils.inl b/src/alloy/backend/x64/lowering/op_utils.inl new file mode 100644 index 000000000..8e502bd63 --- /dev/null +++ b/src/alloy/backend/x64/lowering/op_utils.inl @@ -0,0 +1,574 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2014 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +// NOTE: this file is only designed to be included by lowering_sequencies.cc! + +#ifndef ALLOY_BACKEND_X64_X64_LOWERING_OP_UTILS_INL_ +#define ALLOY_BACKEND_X64_X64_LOWERING_OP_UTILS_INL_ + +namespace { + +// Sets EFLAGs with zf for the given value. +// ZF = 1 if false, 0 = true (so jz = jump if false) +void CheckBoolean(X64Emitter& e, Value* v) { + if (v->IsConstant()) { + e.mov(e.ah, (v->IsConstantZero() ? 1 : 0) << 6); + e.sahf(); + } else if (v->type == INT8_TYPE) { + Reg8 src; + e.BeginOp(v, src, 0); + e.test(src, src); + e.EndOp(src); + } else if (v->type == INT16_TYPE) { + Reg16 src; + e.BeginOp(v, src, 0); + e.test(src, src); + e.EndOp(src); + } else if (v->type == INT32_TYPE) { + Reg32 src; + e.BeginOp(v, src, 0); + e.test(src, src); + e.EndOp(src); + } else if (v->type == INT64_TYPE) { + Reg64 src; + e.BeginOp(v, src, 0); + e.test(src, src); + e.EndOp(src); + } else if (v->type == FLOAT32_TYPE) { + UNIMPLEMENTED_SEQ(); + } else if (v->type == FLOAT64_TYPE) { + UNIMPLEMENTED_SEQ(); + } else if (v->type == VEC128_TYPE) { + UNIMPLEMENTED_SEQ(); + } else { + ASSERT_INVALID_TYPE(); + } +} + +void CompareXX(X64Emitter& e, Instr*& i, void(set_fn)(X64Emitter& e, Reg8& dest, bool invert)) { + if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8, SIG_TYPE_I8)) { + Reg8 dest; + Reg8 src1, src2; + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0, + i->src2.value, src2, 0); + e.cmp(src1, src2); + set_fn(e, dest, false); + e.EndOp(dest, src1, src2); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8, SIG_TYPE_I8C)) { + Reg8 dest; + Reg8 src1; + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0); + e.cmp(src1, i->src2.value->constant.i8); + set_fn(e, dest, false); + e.EndOp(dest, src1); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8C, SIG_TYPE_I8)) { + Reg8 dest; + Reg8 src2; + e.BeginOp(i->dest, dest, REG_DEST, + i->src2.value, src2, 0); + e.cmp(src2, i->src1.value->constant.i8); + set_fn(e, dest, true); + e.EndOp(dest, src2); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16, SIG_TYPE_I16)) { + Reg8 dest; + Reg16 src1, src2; + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0, + i->src2.value, src2, 0); + e.cmp(src1, src2); + set_fn(e, dest, false); + e.EndOp(dest, src1, src2); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16, SIG_TYPE_I16C)) { + Reg8 dest; + Reg16 src1; + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0); + e.cmp(src1, i->src2.value->constant.i16); + set_fn(e, dest, false); + e.EndOp(dest, src1); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16C, SIG_TYPE_I16)) { + Reg8 dest; + Reg16 src2; + e.BeginOp(i->dest, dest, REG_DEST, + i->src2.value, src2, 0); + e.cmp(src2, i->src1.value->constant.i16); + e.sete(dest); + set_fn(e, dest, true); + e.EndOp(dest, src2); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32, SIG_TYPE_I32)) { + Reg8 dest; + Reg32 src1, src2; + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0, + i->src2.value, src2, 0); + e.cmp(src1, src2); + set_fn(e, dest, false); + e.EndOp(dest, src1, src2); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32, SIG_TYPE_I32C)) { + Reg8 dest; + Reg32 src1; + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0); + e.cmp(src1, i->src2.value->constant.i32); + set_fn(e, dest, false); + e.EndOp(dest, src1); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32C, SIG_TYPE_I32)) { + Reg8 dest; + Reg32 src2; + e.BeginOp(i->dest, dest, REG_DEST, + i->src2.value, src2, 0); + e.cmp(src2, i->src1.value->constant.i32); + set_fn(e, dest, true); + e.EndOp(dest, src2); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64, SIG_TYPE_I64)) { + Reg8 dest; + Reg64 src1, src2; + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0, + i->src2.value, src2, 0); + e.cmp(src1, src2); + set_fn(e, dest, false); + e.EndOp(dest, src1, src2); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64, SIG_TYPE_I64C)) { + Reg8 dest; + Reg64 src1; + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0); + e.mov(e.rax, i->src2.value->constant.i64); + e.cmp(src1, e.rax); + set_fn(e, dest, false); + e.EndOp(dest, src1); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64C, SIG_TYPE_I64)) { + Reg8 dest; + Reg64 src2; + e.BeginOp(i->dest, dest, REG_DEST, + i->src2.value, src2, 0); + e.mov(e.rax, i->src1.value->constant.i64); + e.cmp(src2, e.rax); + set_fn(e, dest, true); + e.EndOp(dest, src2); + } else { + UNIMPLEMENTED_SEQ(); + } +}; + +typedef void(v_fn)(X64Emitter& e, Instr& i, const Reg& dest_src); +template +void IntUnaryOpV(X64Emitter& e, Instr*& i, v_fn v_fn, + T& dest, T& src1) { + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0); + if (dest == src1) { + v_fn(e, *i, dest); + } else { + e.mov(dest, src1); + v_fn(e, *i, dest); + } + e.EndOp(dest, src1); +} +template +void IntUnaryOpC(X64Emitter& e, Instr*& i, v_fn v_fn, + T& dest, Value* src1) { + e.BeginOp(i->dest, dest, REG_DEST); + e.mov(dest, (uint64_t)src1->get_constant(CT())); + v_fn(e, *i, dest); + e.EndOp(dest); +} +void IntUnaryOp(X64Emitter& e, Instr*& i, v_fn v_fn) { + if (i->Match(SIG_TYPE_I8, SIG_TYPE_I8)) { + Reg8 dest, src1; + IntUnaryOpV(e, i, v_fn, dest, src1); + } else if (i->Match(SIG_TYPE_I8, SIG_TYPE_I8C)) { + Reg8 dest; + IntUnaryOpC(e, i, v_fn, dest, i->src1.value); + } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16)) { + Reg16 dest, src1; + IntUnaryOpV(e, i, v_fn, dest, src1); + } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16C)) { + Reg16 dest; + IntUnaryOpC(e, i, v_fn, dest, i->src1.value); + } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32)) { + Reg32 dest, src1; + IntUnaryOpV(e, i, v_fn, dest, src1); + } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32C)) { + Reg32 dest; + IntUnaryOpC(e, i, v_fn, dest, i->src1.value); + } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64)) { + Reg64 dest, src1; + IntUnaryOpV(e, i, v_fn, dest, src1); + } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64C)) { + Reg64 dest; + IntUnaryOpC(e, i, v_fn, dest, i->src1.value); + } else { + ASSERT_INVALID_TYPE(); + } + if (i->flags & ARITHMETIC_SET_CARRY) { + // EFLAGS should have CA set? + // (so long as we don't fuck with it) + // UNIMPLEMENTED_SEQ(); + } +}; + +typedef void(vv_fn)(X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src); +typedef void(vc_fn)(X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src); +template +void IntBinaryOpVV(X64Emitter& e, Instr*& i, vv_fn vv_fn, + TD& dest, TS1& src1, TS2& src2) { + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0, + i->src2.value, src2, 0); + if (dest == src1) { + vv_fn(e, *i, dest, src2); + } else if (dest == src2) { + if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { + vv_fn(e, *i, dest, src1); + } else { + // Eww. + e.mov(e.rax, src1); + vv_fn(e, *i, e.rax, src2); + e.mov(dest, e.rax); + } + } else { + e.mov(dest, src1); + vv_fn(e, *i, dest, src2); + } + e.EndOp(dest, src1, src2); +} +template +void IntBinaryOpVC(X64Emitter& e, Instr*& i, vv_fn vv_fn, vc_fn vc_fn, + TD& dest, TS1& src1, Value* src2) { + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0); + if (dest.getBit() <= 32) { + // 32-bit. + if (dest == src1) { + vc_fn(e, *i, dest, (uint32_t)src2->get_constant(CT())); + } else { + e.mov(dest, src1); + vc_fn(e, *i, dest, (uint32_t)src2->get_constant(CT())); + } + } else { + // 64-bit. + if (dest == src1) { + e.mov(e.rax, src2->constant.i64); + vv_fn(e, *i, dest, e.rax); + } else { + e.mov(e.rax, src2->constant.i64); + e.mov(dest, src1); + vv_fn(e, *i, dest, e.rax); + } + } + e.EndOp(dest, src1); +} +template +void IntBinaryOpCV(X64Emitter& e, Instr*& i, vv_fn vv_fn, vc_fn vc_fn, + TD& dest, Value* src1, TS2& src2) { + e.BeginOp(i->dest, dest, REG_DEST, + i->src2.value, src2, 0); + if (dest.getBit() <= 32) { + // 32-bit. + if (dest == src2) { + if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { + vc_fn(e, *i, dest, (uint32_t)src1->get_constant(CT())); + } else { + // Eww. + e.mov(e.rax, src2); + e.mov(dest, (uint32_t)src1->get_constant(CT())); + vv_fn(e, *i, dest, e.rax); + } + } else { + e.mov(dest, src2); + vc_fn(e, *i, dest, (uint32_t)src1->get_constant(CT())); + } + } else { + // 64-bit. + if (dest == src2) { + if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { + e.mov(e.rax, src1->constant.i64); + vv_fn(e, *i, dest, e.rax); + } else { + // Eww. + e.mov(e.rax, src1->constant.i64); + vv_fn(e, *i, e.rax, src2); + e.mov(dest, e.rax); + } + } else { + e.mov(e.rax, src2); + e.mov(dest, src1->constant.i64); + vv_fn(e, *i, dest, e.rax); + } + } + e.EndOp(dest, src2); +} +void IntBinaryOp(X64Emitter& e, Instr*& i, vv_fn vv_fn, vc_fn vc_fn) { + // TODO(benvanik): table lookup. This linear scan is slow. + // Note: we assume DEST.type = SRC1.type, but that SRC2.type may vary. + XEASSERT(i->dest->type == i->src1.value->type); + if (i->Match(SIG_TYPE_I8, SIG_TYPE_I8, SIG_TYPE_I8)) { + Reg8 dest, src1, src2; + IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); + } else if (i->Match(SIG_TYPE_I8, SIG_TYPE_I8, SIG_TYPE_I8C)) { + Reg8 dest, src1; + IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); + } else if (i->Match(SIG_TYPE_I8, SIG_TYPE_I8C, SIG_TYPE_I8)) { + Reg8 dest, src2; + IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); + } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I16)) { + Reg16 dest, src1, src2; + IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); + } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I16C)) { + Reg16 dest, src1; + IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); + } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16C, SIG_TYPE_I16)) { + Reg16 dest, src2; + IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); + } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I32)) { + Reg32 dest, src1, src2; + IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); + } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I32C)) { + Reg32 dest, src1; + IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); + } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32C, SIG_TYPE_I32)) { + Reg32 dest, src2; + IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); + } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I64)) { + Reg64 dest, src1, src2; + IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); + } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I64C)) { + Reg64 dest, src1; + IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); + } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64C, SIG_TYPE_I64)) { + Reg64 dest, src2; + IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); + // Start forced src2=i8 + } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I8)) { + Reg16 dest, src1; + Reg8 src2; + IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); + } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I8C)) { + Reg16 dest, src1; + IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); + } else if (i->Match(SIG_TYPE_I16, SIG_TYPE_I16C, SIG_TYPE_I8)) { + Reg16 dest; + Reg8 src2; + IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); + } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I8)) { + Reg32 dest, src1; + Reg8 src2; + IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); + } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I8C)) { + Reg32 dest, src1; + IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); + } else if (i->Match(SIG_TYPE_I32, SIG_TYPE_I32C, SIG_TYPE_I8)) { + Reg32 dest; + Reg8 src2; + IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); + } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I8)) { + Reg64 dest, src1; + Reg8 src2; + IntBinaryOpVV(e, i, vv_fn, dest, src1, src2); + } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I8C)) { + Reg64 dest, src1; + IntBinaryOpVC(e, i, vv_fn, vc_fn, dest, src1, i->src2.value); + } else if (i->Match(SIG_TYPE_I64, SIG_TYPE_I64C, SIG_TYPE_I8)) { + Reg64 dest; + Reg8 src2; + IntBinaryOpCV(e, i, vv_fn, vc_fn, dest, i->src1.value, src2); + } else { + ASSERT_INVALID_TYPE(); + } + if (i->flags & ARITHMETIC_SET_CARRY) { + // EFLAGS should have CA set? + // (so long as we don't fuck with it) + // UNIMPLEMENTED_SEQ(); + } +}; + +typedef void(vvv_fn)(X64Emitter& e, Instr& i, const Reg& dest_src1, const Operand& src2, const Operand& src3); +typedef void(vvc_fn)(X64Emitter& e, Instr& i, const Reg& dest_src1, const Operand& src2, uint32_t src3); +typedef void(vcv_fn)(X64Emitter& e, Instr& i, const Reg& dest_src1, uint32_t src2, const Operand& src3); +template +void IntTernaryOpVVV(X64Emitter& e, Instr*& i, vvv_fn vvv_fn, + TD& dest, TS1& src1, TS2& src2, TS3& src3) { + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0, + i->src2.value, src2, 0, + i->src3.value, src3, 0); + if (dest == src1) { + vvv_fn(e, *i, dest, src2, src3); + } else if (dest == src2) { + if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { + vvv_fn(e, *i, dest, src1, src3); + } else { + UNIMPLEMENTED_SEQ(); + } + } else { + e.mov(dest, src1); + vvv_fn(e, *i, dest, src2, src3); + } + e.EndOp(dest, src1, src2, src3); +} +template +void IntTernaryOpVVC(X64Emitter& e, Instr*& i, vvv_fn vvv_fn, vvc_fn vvc_fn, + TD& dest, TS1& src1, TS2& src2, Value* src3) { + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0, + i->src2.value, src2, 0); + if (dest.getBit() <= 32) { + // 32-bit. + if (dest == src1) { + vvc_fn(e, *i, dest, src2, (uint32_t)src3->get_constant(CT())); + } else if (dest == src2) { + if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { + vvc_fn(e, *i, dest, src1, (uint32_t)src3->get_constant(CT())); + } else { + // Eww. + e.mov(e.rax, src2); + e.mov(dest, src1); + vvc_fn(e, *i, dest, e.rax, (uint32_t)src3->get_constant(CT())); + } + } else { + e.mov(dest, src1); + vvc_fn(e, *i, dest, src2, (uint32_t)src3->get_constant(CT())); + } + } else { + // 64-bit. + if (dest == src1) { + e.mov(e.rax, src3->constant.i64); + vvv_fn(e, *i, dest, src2, e.rax); + } else if (dest == src2) { + if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { + e.mov(e.rax, src3->constant.i64); + vvv_fn(e, *i, dest, src1, e.rax); + } else { + // Eww. + e.mov(e.rax, src1); + e.mov(src1, src2); + e.mov(dest, e.rax); + e.mov(e.rax, src3->constant.i64); + vvv_fn(e, *i, dest, src1, e.rax); + } + } else { + e.mov(e.rax, src3->constant.i64); + e.mov(dest, src1); + vvv_fn(e, *i, dest, src2, e.rax); + } + } + e.EndOp(dest, src1, src2); +} +template +void IntTernaryOpVCV(X64Emitter& e, Instr*& i, vvv_fn vvv_fn, vcv_fn vcv_fn, + TD& dest, TS1& src1, Value* src2, TS3& src3) { + e.BeginOp(i->dest, dest, REG_DEST, + i->src1.value, src1, 0, + i->src3.value, src3, 0); + if (dest.getBit() <= 32) { + // 32-bit. + if (dest == src1) { + vcv_fn(e, *i, dest, (uint32_t)src2->get_constant(CT()), src3); + } else if (dest == src3) { + if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { + vcv_fn(e, *i, dest, (uint32_t)src2->get_constant(CT()), src1); + } else { + // Eww. + e.mov(e.rax, src3); + e.mov(dest, src1); + vcv_fn(e, *i, dest, (uint32_t)src2->get_constant(CT()), e.rax); + } + } else { + e.mov(dest, src1); + vcv_fn(e, *i, dest, (uint32_t)src2->get_constant(CT()), src3); + } + } else { + // 64-bit. + if (dest == src1) { + e.mov(e.rax, src2->constant.i64); + vvv_fn(e, *i, dest, e.rax, src3); + } else if (dest == src3) { + if (i->opcode->flags & OPCODE_FLAG_COMMUNATIVE) { + e.mov(e.rax, src2->constant.i64); + vvv_fn(e, *i, dest, src1, e.rax); + } else { + // Eww. + e.mov(e.rax, src1); + e.mov(src1, src3); + e.mov(dest, e.rax); + e.mov(e.rax, src2->constant.i64); + vvv_fn(e, *i, dest, e.rax, src1); + } + } else { + e.mov(e.rax, src2->constant.i64); + e.mov(dest, src1); + vvv_fn(e, *i, dest, e.rax, src3); + } + } + e.EndOp(dest, src1, src3); +} +void IntTernaryOp(X64Emitter& e, Instr*& i, vvv_fn vvv_fn, vvc_fn vvc_fn, vcv_fn vcv_fn) { + // TODO(benvanik): table lookup. This linear scan is slow. + // Note: we assume DEST.type = SRC1.type = SRC2.type, but that SRC3.type may vary. + XEASSERT(i->dest->type == i->src1.value->type && + i->dest->type == i->src2.value->type); + // TODO(benvanik): table lookup. + if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8, SIG_TYPE_I8, SIG_TYPE_I8)) { + Reg8 dest, src1, src2; + Reg8 src3; + IntTernaryOpVVV(e, i, vvv_fn, dest, src1, src2, src3); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8, SIG_TYPE_I8, SIG_TYPE_I8C)) { + Reg8 dest, src1, src2; + IntTernaryOpVVC(e, i, vvv_fn, vvc_fn, dest, src1, src2, i->src3.value); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I8)) { + Reg16 dest, src1, src2; + Reg8 src3; + IntTernaryOpVVV(e, i, vvv_fn, dest, src1, src2, src3); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16, SIG_TYPE_I16, SIG_TYPE_I8C)) { + Reg16 dest, src1, src2; + IntTernaryOpVVC(e, i, vvv_fn, vvc_fn, dest, src1, src2, i->src3.value); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I8)) { + Reg32 dest, src1, src2; + Reg8 src3; + IntTernaryOpVVV(e, i,vvv_fn, dest, src1, src2, src3); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32, SIG_TYPE_I32, SIG_TYPE_I8C)) { + Reg32 dest, src1, src2; + IntTernaryOpVVC(e, i, vvv_fn, vvc_fn, dest, src1, src2, i->src3.value); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I8)) { + Reg64 dest, src1, src2; + Reg8 src3; + IntTernaryOpVVV(e, i, vvv_fn, dest, src1, src2, src3); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64, SIG_TYPE_I64, SIG_TYPE_I8C)) { + Reg64 dest, src1, src2; + IntTernaryOpVVC(e, i, vvv_fn, vvc_fn, dest, src1, src2, i->src3.value); + // + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I8, SIG_TYPE_I8C, SIG_TYPE_I8)) { + Reg8 dest, src1, src3; + IntTernaryOpVCV(e, i, vvv_fn, vcv_fn, dest, src1, i->src2.value, src3); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I16, SIG_TYPE_I16C, SIG_TYPE_I8)) { + Reg16 dest, src1, src3; + IntTernaryOpVCV(e, i, vvv_fn, vcv_fn, dest, src1, i->src2.value, src3); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I32, SIG_TYPE_I32C, SIG_TYPE_I8)) { + Reg32 dest, src1, src3; + IntTernaryOpVCV(e, i, vvv_fn, vcv_fn, dest, src1, i->src2.value, src3); + } else if (i->Match(SIG_TYPE_IGNORE, SIG_TYPE_I64, SIG_TYPE_I64C, SIG_TYPE_I8)) { + Reg64 dest, src1, src3; + IntTernaryOpVCV(e, i, vvv_fn, vcv_fn, dest, src1, i->src2.value, src3); + } else { + ASSERT_INVALID_TYPE(); + } + if (i->flags & ARITHMETIC_SET_CARRY) { + // EFLAGS should have CA set? + // (so long as we don't fuck with it) + // UNIMPLEMENTED_SEQ(); + } +} + +} // namespace + +#endif // ALLOY_BACKEND_X64_X64_LOWERING_OP_UTILS_INL_ diff --git a/src/alloy/backend/x64/lowering/sources.gypi b/src/alloy/backend/x64/lowering/sources.gypi index 5c710cfcc..93a754180 100644 --- a/src/alloy/backend/x64/lowering/sources.gypi +++ b/src/alloy/backend/x64/lowering/sources.gypi @@ -5,5 +5,6 @@ 'lowering_sequences.h', 'lowering_table.cc', 'lowering_table.h', + 'op_utils.inl', ], }