Reformating lambdas to make vs happier.

This commit is contained in:
Ben Vanik 2014-01-26 21:34:46 -08:00
parent 6e35b6efa3
commit ae6c903173
1 changed files with 183 additions and 229 deletions

View File

@ -1311,15 +1311,13 @@ table->AddSequence(OPCODE_LOAD, [](X64Emitter& e, Instr*& i) {
if (cbs->handles(cbs->context, address)) { if (cbs->handles(cbs->context, address)) {
// Eh, hacking lambdas. // Eh, hacking lambdas.
i->src3.offset = (uint64_t)cbs; i->src3.offset = (uint64_t)cbs;
IntUnaryOp( IntUnaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src) {
e, i, auto cbs = (RegisterAccessCallbacks*)i.src3.offset;
[](X64Emitter& e, Instr& i, const Reg& dest_src) { e.mov(e.rcx, (uint64_t)cbs->context);
auto cbs = (RegisterAccessCallbacks*)i.src3.offset; e.mov(e.rdx, i.src1.value->AsUint64());
e.mov(e.rcx, (uint64_t)cbs->context); CallNative(e, cbs->read);
e.mov(e.rdx, i.src1.value->AsUint64()); e.mov(dest_src, e.rax);
CallNative(e, cbs->read); });
e.mov(dest_src, e.rax);
});
i = e.Advance(i); i = e.Advance(i);
return true; return true;
} }
@ -1774,14 +1772,11 @@ table->AddSequence(OPCODE_VECTOR_COMPARE_UGE, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_ADD, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_ADD, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, e.add(dest_src, src);
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.add(dest_src, src); e.add(dest_src, src);
}, });
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.add(dest_src, src);
});
} else if (IsFloatType(i->dest->type)) { } else if (IsFloatType(i->dest->type)) {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} else if (IsVecType(i->dest->type)) { } else if (IsVecType(i->dest->type)) {
@ -1796,36 +1791,32 @@ table->AddSequence(OPCODE_ADD, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_ADD_CARRY, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_ADD_CARRY, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
// dest = src1 + src2 + src3.i8 // dest = src1 + src2 + src3.i8
IntTernaryOp( IntTernaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src2, const Operand& src3) {
e, i, Reg8 src3_8(src3.getIdx());
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src2, const Operand& src3) { if (src3.getIdx() <= 4) {
Reg8 src3_8(src3.getIdx()); e.mov(e.ah, src3_8);
if (src3.getIdx() <= 4) { } else {
e.mov(e.ah, src3_8); e.mov(e.al, src3_8);
} else { e.mov(e.ah, e.al);
e.mov(e.al, src3_8); }
e.mov(e.ah, e.al); e.sahf();
} e.adc(dest_src, src2);
e.sahf(); }, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src2, uint32_t src3) {
e.adc(dest_src, src2); e.mov(e.eax, src3);
}, e.mov(e.ah, e.al);
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src2, uint32_t src3) { e.sahf();
e.mov(e.eax, src3); e.adc(dest_src, src2);
e.mov(e.ah, e.al); }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src2, const Operand& src3) {
e.sahf(); Reg8 src3_8(src3.getIdx());
e.adc(dest_src, src2); if (src3.getIdx() <= 4) {
}, e.mov(e.ah, src3_8);
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src2, const Operand& src3) { } else {
Reg8 src3_8(src3.getIdx()); e.mov(e.al, src3_8);
if (src3.getIdx() <= 4) { e.mov(e.ah, e.al);
e.mov(e.ah, src3_8); }
} else { e.sahf();
e.mov(e.al, src3_8); e.adc(dest_src, src2);
e.mov(e.ah, e.al); });
}
e.sahf();
e.adc(dest_src, src2);
});
} else { } else {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} }
@ -1855,14 +1846,11 @@ table->AddSequence(OPCODE_VECTOR_ADD, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_SUB, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_SUB, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, e.sub(dest_src, src);
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.sub(dest_src, src); e.sub(dest_src, src);
}, });
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.sub(dest_src, src);
});
} else if (IsFloatType(i->dest->type)) { } else if (IsFloatType(i->dest->type)) {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} else if (IsVecType(i->dest->type)) { } else if (IsVecType(i->dest->type)) {
@ -1878,34 +1866,31 @@ table->AddSequence(OPCODE_SUB, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_MUL, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_MUL, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, // RAX = value, RDX = clobbered
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { // TODO(benvanik): make the register allocator put dest_src in RAX?
// RAX = value, RDX = clobbered auto Nax = LIKE_REG(e.rax, dest_src);
// TODO(benvanik): make the register allocator put dest_src in RAX? e.mov(Nax, dest_src);
auto Nax = LIKE_REG(e.rax, dest_src); if (i.flags & ARITHMETIC_UNSIGNED) {
e.mov(Nax, dest_src); e.mul(src);
if (i.flags & ARITHMETIC_UNSIGNED) { } else {
e.mul(src); e.imul(src);
} else { }
e.imul(src); e.mov(dest_src, Nax);
} }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.mov(dest_src, Nax); // RAX = value, RDX = clobbered
}, // TODO(benvanik): make the register allocator put dest_src in RAX?
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) { auto Nax = LIKE_REG(e.rax, dest_src);
// RAX = value, RDX = clobbered auto Ndx = LIKE_REG(e.rdx, dest_src);
// TODO(benvanik): make the register allocator put dest_src in RAX? e.mov(Nax, dest_src);
auto Nax = LIKE_REG(e.rax, dest_src); e.mov(Ndx, src);
auto Ndx = LIKE_REG(e.rdx, dest_src); if (i.flags & ARITHMETIC_UNSIGNED) {
e.mov(Nax, dest_src); e.mul(Ndx);
e.mov(Ndx, src); } else {
if (i.flags & ARITHMETIC_UNSIGNED) { e.imul(Ndx);
e.mul(Ndx); }
} else { e.mov(dest_src, Nax);
e.imul(Ndx); });
}
e.mov(dest_src, Nax);
});
} else if (IsFloatType(i->dest->type)) { } else if (IsFloatType(i->dest->type)) {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} else if (IsVecType(i->dest->type)) { } else if (IsVecType(i->dest->type)) {
@ -1919,35 +1904,32 @@ table->AddSequence(OPCODE_MUL, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_MUL_HI, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_MUL_HI, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, // RAX = value, RDX = clobbered
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { // TODO(benvanik): make the register allocator put dest_src in RAX?
// RAX = value, RDX = clobbered auto Nax = LIKE_REG(e.rax, dest_src);
// TODO(benvanik): make the register allocator put dest_src in RAX? auto Ndx = LIKE_REG(e.rdx, dest_src);
auto Nax = LIKE_REG(e.rax, dest_src); e.mov(Nax, dest_src);
auto Ndx = LIKE_REG(e.rdx, dest_src); if (i.flags & ARITHMETIC_UNSIGNED) {
e.mov(Nax, dest_src); e.mul(src);
if (i.flags & ARITHMETIC_UNSIGNED) { } else {
e.mul(src); e.imul(src);
} else { }
e.imul(src); e.mov(dest_src, Ndx);
} }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.mov(dest_src, Ndx); // RAX = value, RDX = clobbered
}, // TODO(benvanik): make the register allocator put dest_src in RAX?
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) { auto Nax = LIKE_REG(e.rax, dest_src);
// RAX = value, RDX = clobbered auto Ndx = LIKE_REG(e.rdx, dest_src);
// TODO(benvanik): make the register allocator put dest_src in RAX? e.mov(Nax, dest_src);
auto Nax = LIKE_REG(e.rax, dest_src); e.mov(Ndx, src);
auto Ndx = LIKE_REG(e.rdx, dest_src); if (i.flags & ARITHMETIC_UNSIGNED) {
e.mov(Nax, dest_src); e.mul(Ndx);
e.mov(Ndx, src); } else {
if (i.flags & ARITHMETIC_UNSIGNED) { e.imul(Ndx);
e.mul(Ndx); }
} else { e.mov(dest_src, Ndx);
e.imul(Ndx); });
}
e.mov(dest_src, Ndx);
});
} else { } else {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} }
@ -1957,34 +1939,31 @@ table->AddSequence(OPCODE_MUL_HI, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_DIV, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_DIV, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, // RAX = value, RDX = clobbered
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { // TODO(benvanik): make the register allocator put dest_src in RAX?
// RAX = value, RDX = clobbered auto Nax = LIKE_REG(e.rax, dest_src);
// TODO(benvanik): make the register allocator put dest_src in RAX? e.mov(Nax, dest_src);
auto Nax = LIKE_REG(e.rax, dest_src); if (i.flags & ARITHMETIC_UNSIGNED) {
e.mov(Nax, dest_src); e.div(src);
if (i.flags & ARITHMETIC_UNSIGNED) { } else {
e.div(src); e.idiv(src);
} else { }
e.idiv(src); e.mov(dest_src, Nax);
} }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.mov(dest_src, Nax); // RAX = value, RDX = clobbered
}, // TODO(benvanik): make the register allocator put dest_src in RAX?
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) { auto Nax = LIKE_REG(e.rax, dest_src);
// RAX = value, RDX = clobbered auto Ndx = LIKE_REG(e.rdx, dest_src);
// TODO(benvanik): make the register allocator put dest_src in RAX? e.mov(Nax, dest_src);
auto Nax = LIKE_REG(e.rax, dest_src); e.mov(Ndx, src);
auto Ndx = LIKE_REG(e.rdx, dest_src); if (i.flags & ARITHMETIC_UNSIGNED) {
e.mov(Nax, dest_src); e.div(Ndx);
e.mov(Ndx, src); } else {
if (i.flags & ARITHMETIC_UNSIGNED) { e.idiv(Ndx);
e.div(Ndx); }
} else { e.mov(dest_src, Nax);
e.idiv(Ndx); });
}
e.mov(dest_src, Nax);
});
} else if (IsFloatType(i->dest->type)) { } else if (IsFloatType(i->dest->type)) {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} else if (IsVecType(i->dest->type)) { } else if (IsVecType(i->dest->type)) {
@ -2122,14 +2101,11 @@ table->AddSequence(OPCODE_DOT_PRODUCT_4, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_AND, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_AND, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, e.and(dest_src, src);
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.and(dest_src, src); e.and(dest_src, src);
}, });
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.and(dest_src, src);
});
} else if (IsVecType(i->dest->type)) { } else if (IsVecType(i->dest->type)) {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} else { } else {
@ -2141,14 +2117,11 @@ table->AddSequence(OPCODE_AND, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_OR, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_OR, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, e.or(dest_src, src);
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.or(dest_src, src); e.or(dest_src, src);
}, });
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.or(dest_src, src);
});
} else if (IsVecType(i->dest->type)) { } else if (IsVecType(i->dest->type)) {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} else { } else {
@ -2160,14 +2133,11 @@ table->AddSequence(OPCODE_OR, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_XOR, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_XOR, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, e.xor(dest_src, src);
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.xor(dest_src, src); e.xor(dest_src, src);
}, });
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.xor(dest_src, src);
});
} else if (IsVecType(i->dest->type)) { } else if (IsVecType(i->dest->type)) {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} else { } else {
@ -2179,11 +2149,9 @@ table->AddSequence(OPCODE_XOR, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_NOT, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_NOT, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
IntUnaryOp( IntUnaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src) {
e, i, e.not(dest_src);
[](X64Emitter& e, Instr& i, const Reg& dest_src) { });
e.not(dest_src);
});
} else if (IsVecType(i->dest->type)) { } else if (IsVecType(i->dest->type)) {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} else { } else {
@ -2196,24 +2164,21 @@ table->AddSequence(OPCODE_NOT, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_SHL, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_SHL, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
// TODO(benvanik): use shlx if available. // TODO(benvanik): use shlx if available.
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, // Can only shl by cl. Eww x86.
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { Reg8 shamt(src.getIdx());
// Can only shl by cl. Eww x86. e.mov(e.rax, e.rcx);
Reg8 shamt(src.getIdx()); e.mov(e.cl, shamt);
e.mov(e.rax, e.rcx); e.shl(dest_src, e.cl);
e.mov(e.cl, shamt); e.mov(e.rcx, e.rax);
e.shl(dest_src, e.cl); // BeaEngine can't disasm this, boo.
e.mov(e.rcx, e.rax); /*Reg32e dest_src_e(dest_src.getIdx(), MAX(dest_src.getBit(), 32));
// BeaEngine can't disasm this, boo. Reg32e src_e(src.getIdx(), MAX(dest_src.getBit(), 32));
/*Reg32e dest_src_e(dest_src.getIdx(), MAX(dest_src.getBit(), 32)); e.and(src_e, 0x3F);
Reg32e src_e(src.getIdx(), MAX(dest_src.getBit(), 32)); e.shlx(dest_src_e, dest_src_e, src_e);*/
e.and(src_e, 0x3F); }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.shlx(dest_src_e, dest_src_e, src_e);*/ e.shl(dest_src, src);
}, });
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.shl(dest_src, src);
});
} else { } else {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} }
@ -2224,19 +2189,16 @@ table->AddSequence(OPCODE_SHL, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_SHR, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_SHR, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
// TODO(benvanik): use shrx if available. // TODO(benvanik): use shrx if available.
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, // Can only sar by cl. Eww x86.
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { Reg8 shamt(src.getIdx());
// Can only sar by cl. Eww x86. e.mov(e.rax, e.rcx);
Reg8 shamt(src.getIdx()); e.mov(e.cl, shamt);
e.mov(e.rax, e.rcx); e.shr(dest_src, e.cl);
e.mov(e.cl, shamt); e.mov(e.rcx, e.rax);
e.shr(dest_src, e.cl); }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.mov(e.rcx, e.rax); e.shr(dest_src, src);
}, });
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.shr(dest_src, src);
});
} else { } else {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} }
@ -2247,19 +2209,16 @@ table->AddSequence(OPCODE_SHR, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_SHA, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_SHA, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
// TODO(benvanik): use sarx if available. // TODO(benvanik): use sarx if available.
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, // Can only sar by cl. Eww x86.
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { Reg8 shamt(src.getIdx());
// Can only sar by cl. Eww x86. e.mov(e.rax, e.rcx);
Reg8 shamt(src.getIdx()); e.mov(e.cl, shamt);
e.mov(e.rax, e.rcx); e.sar(dest_src, e.cl);
e.mov(e.cl, shamt); e.mov(e.rcx, e.rax);
e.sar(dest_src, e.cl); }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.mov(e.rcx, e.rax); e.sar(dest_src, src);
}, });
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.sar(dest_src, src);
});
} else { } else {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} }
@ -2323,19 +2282,16 @@ table->AddSequence(OPCODE_VECTOR_SHA, [](X64Emitter& e, Instr*& i) {
table->AddSequence(OPCODE_ROTATE_LEFT, [](X64Emitter& e, Instr*& i) { table->AddSequence(OPCODE_ROTATE_LEFT, [](X64Emitter& e, Instr*& i) {
if (IsIntType(i->dest->type)) { if (IsIntType(i->dest->type)) {
IntBinaryOp( IntBinaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) {
e, i, // Can only rol by cl. Eww x86.
[](X64Emitter& e, Instr& i, const Reg& dest_src, const Operand& src) { Reg8 shamt(src.getIdx());
// Can only rol by cl. Eww x86. e.mov(e.rax, e.rcx);
Reg8 shamt(src.getIdx()); e.mov(e.cl, shamt);
e.mov(e.rax, e.rcx); e.rol(dest_src, e.cl);
e.mov(e.cl, shamt); e.mov(e.rcx, e.rax);
e.rol(dest_src, e.cl); }, [](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.mov(e.rcx, e.rax); e.rol(dest_src, src);
}, });
[](X64Emitter& e, Instr& i, const Reg& dest_src, uint32_t src) {
e.rol(dest_src, src);
});
} else { } else {
UNIMPLEMENTED_SEQ(); UNIMPLEMENTED_SEQ();
} }
@ -2584,9 +2540,7 @@ table->AddSequence(OPCODE_UNPACK, [](X64Emitter& e, Instr*& i) {
// Load source, move from tight pack of X16Y16.... to X16...Y16... // Load source, move from tight pack of X16Y16.... to X16...Y16...
// Also zero out the high end. // Also zero out the high end.
// TODO(benvanik): special case constant unpacks that just get 0/1/etc. // TODO(benvanik): special case constant unpacks that just get 0/1/etc.
IntUnaryOp( IntUnaryOp(e, i, [](X64Emitter& e, Instr& i, const Reg& dest_src) {
e, i,
[](X64Emitter& e, Instr& i, const Reg& dest_src) {
// sx = src.iw >> 16; // sx = src.iw >> 16;
// sy = src.iw & 0xFFFF; // sy = src.iw & 0xFFFF;
// dest = { 3.0 + (sx / float(1 << 22)), // dest = { 3.0 + (sx / float(1 << 22)),