Fixing 64-bit mov encoding. *shakes fist at xbyak for silently coercing*

This commit is contained in:
Ben Vanik 2014-01-27 21:32:58 -08:00
parent 8894a0f86e
commit da36baba8d
2 changed files with 27 additions and 8 deletions

View File

@ -707,7 +707,7 @@ table->AddSequence(OPCODE_STORE_CONTEXT, [](X64Emitter& e, Instr*& i) {
CallNative(e, TraceContextStoreI64);
#endif // DTRACE
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_I64C)) {
e.mov(e.qword[e.rcx + i->src1.offset], i->src2.value->constant.i64);
MovMem64(e, e.rcx + i->src1.offset, i->src2.value->constant.i64);
#if DTRACE
e.mov(e.rdx, i->src1.offset);
e.mov(e.r8, i->src2.value->constant.i64);
@ -741,7 +741,7 @@ table->AddSequence(OPCODE_STORE_CONTEXT, [](X64Emitter& e, Instr*& i) {
CallNative(e, TraceContextStoreF64);
#endif // DTRACE
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_F64C)) {
e.mov(e.qword[e.rcx + i->src1.offset], i->src2.value->constant.i64);
MovMem64(e, e.rcx + i->src1.offset, i->src2.value->constant.i64);
#if DTRACE
e.mov(e.rdx, i->src1.offset);
e.movsd(e.xmm0, e.qword[e.rcx + i->src1.offset]);
@ -759,8 +759,10 @@ table->AddSequence(OPCODE_STORE_CONTEXT, [](X64Emitter& e, Instr*& i) {
CallNative(e, TraceContextStoreF64);
#endif // DTRACE
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_V128C)) {
e.mov(e.qword[e.rcx + i->src1.offset], i->src2.value->constant.v128.low);
e.mov(e.qword[e.rcx + i->src1.offset + 8], i->src2.value->constant.v128.high);
// TODO(benvanik): check zero
// TODO(benvanik): correct order?
MovMem64(e, e.rcx + i->src1.offset, i->src2.value->constant.v128.low);
MovMem64(e, e.rcx + i->src1.offset + 8, i->src2.value->constant.v128.high);
#if DTRACE
e.mov(e.rdx, i->src1.offset);
e.movups(e.xmm0, e.ptr[e.rcx + i->src1.offset]);
@ -1012,7 +1014,7 @@ table->AddSequence(OPCODE_STORE, [](X64Emitter& e, Instr*& i) {
CallNative(e, TraceMemoryStoreI64);
#endif // DTRACE
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_I64C)) {
e.mov(e.qword[addr], i->src2.value->constant.i64);
MovMem64(e, addr, i->src2.value->constant.i64);
#if DTRACE
e.lea(e.rdx, e.ptr[addr]);
e.mov(e.r8, i->src2.value->constant.i64);
@ -1046,7 +1048,7 @@ table->AddSequence(OPCODE_STORE, [](X64Emitter& e, Instr*& i) {
CallNative(e, TraceMemoryStoreF64);
#endif // DTRACE
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_F64C)) {
e.mov(e.qword[addr], i->src2.value->constant.i64);
MovMem64(e, addr, i->src2.value->constant.i64);
#if DTRACE
e.lea(e.rdx, e.ptr[addr]);
e.movsd(e.xmm0, e.ptr[addr]);
@ -1065,8 +1067,10 @@ table->AddSequence(OPCODE_STORE, [](X64Emitter& e, Instr*& i) {
CallNative(e, TraceMemoryStoreV128);
#endif // DTRACE
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_V128C)) {
e.mov(e.ptr[addr], i->src2.value->constant.v128.low);
e.mov(e.ptr[addr + 8], i->src2.value->constant.v128.high);
// TODO(benvanik): check zero
// TODO(benvanik): correct order?
MovMem64(e, addr, i->src2.value->constant.v128.low);
MovMem64(e, addr + 8, i->src2.value->constant.v128.high);
#if DTRACE
e.lea(e.rdx, e.ptr[addr]);
e.movups(e.xmm0, e.ptr[addr]);

View File

@ -14,6 +14,21 @@
namespace {
// Moves a 64bit immediate into memory.
void MovMem64(X64Emitter& e, RegExp& addr, uint64_t v) {
if ((v & ~0x7FFFFFFF) == 0) {
// Fits under 31 bits, so just load using normal mov.
e.mov(e.qword[addr], v);
} else if ((v & ~0x7FFFFFFF) == ~0x7FFFFFFF) {
// Negative number that fits in 32bits.
e.mov(e.qword[addr], v);
} else {
// 64bit number that needs double movs.
e.mov(e.rax, v);
e.mov(e.qword[addr], e.rax);
}
}
// Sets EFLAGs with zf for the given value.
// ZF = 1 if false, 0 = true (so jz = jump if false)
void CheckBoolean(X64Emitter& e, Value* v) {