[a64] Fix instruction constant generation

Fixes some offset generation as well
This commit is contained in:
Wunkolo 2024-05-09 06:29:55 -07:00
parent dc6666d4d2
commit 6e83e2a42d
2 changed files with 50 additions and 16 deletions

View File

@ -60,10 +60,10 @@ XReg ComputeMemoryAddressOffset(A64Emitter& e, const T& guest, const T& offset,
// TODO(benvanik): find a way to avoid doing this.
e.MOV(W0, guest.reg().toW());
}
e.ADD(address_register.toX(), e.GetMembaseReg(), X0);
e.MOV(X1, offset_const);
e.ADD(X0, X0, X1);
e.MOV(X0, offset_const);
e.ADD(address_register.toX(), address_register.toX(), X0);
e.ADD(address_register.toX(), e.GetMembaseReg(), X0);
return address_register.toX();
}
}
@ -189,14 +189,15 @@ struct ATOMIC_COMPARE_EXCHANGE_I32
if (xe::memory::allocation_granularity() > 0x1000) {
// Emulate the 4 KB physical address offset in 0xE0000000+ when can't do
// it via memory mapping.
e.CMP(i.src1.reg(), 0xE0, LSL, 24);
e.MOV(W3, 0xE0000000);
e.CMP(i.src1.reg(), X3);
e.CSET(W1, Cond::HS);
e.LSL(W1, W1, 12);
e.ADD(W1, W1, i.src1.reg().toW());
} else {
e.MOV(W1, i.src1.reg().toW());
}
e.ADD(W1, e.GetMembaseReg().toW(), W1);
e.ADD(X1, e.GetMembaseReg(), X1);
// if([C] == A) [C] = B
// else A = [C]
@ -215,14 +216,15 @@ struct ATOMIC_COMPARE_EXCHANGE_I64
if (xe::memory::allocation_granularity() > 0x1000) {
// Emulate the 4 KB physical address offset in 0xE0000000+ when can't do
// it via memory mapping.
e.CMP(i.src1.reg(), 0xE0, LSL, 24);
e.MOV(W3, 0xE0000000);
e.CMP(i.src1.reg(), X3);
e.CSET(W1, Cond::HS);
e.LSL(W1, W1, 12);
e.ADD(W1, W1, i.src1.reg().toW());
} else {
e.MOV(W1, i.src1.reg().toW());
}
e.ADD(W1, e.GetMembaseReg().toW(), W1);
e.ADD(X1, e.GetMembaseReg(), X1);
// if([C] == A) [C] = B
// else A = [C]

View File

@ -872,7 +872,8 @@ struct COMPARE_EQ_I8
EmitCommutativeCompareOp(
e, i, [](A64Emitter& e, WReg src1, WReg src2) { e.CMP(src1, src2); },
[](A64Emitter& e, WReg src1, int32_t constant) {
e.CMP(src1, constant);
e.MOV(W1, constant);
e.CMP(src1, W1);
});
e.CSET(i.dest, Cond::EQ);
}
@ -883,7 +884,8 @@ struct COMPARE_EQ_I16
EmitCommutativeCompareOp(
e, i, [](A64Emitter& e, WReg src1, WReg src2) { e.CMP(src1, src2); },
[](A64Emitter& e, WReg src1, int32_t constant) {
e.CMP(src1, constant);
e.MOV(W1, constant);
e.CMP(src1, W1);
});
e.CSET(i.dest, Cond::EQ);
}
@ -947,7 +949,8 @@ struct COMPARE_NE_I8
EmitCommutativeCompareOp(
e, i, [](A64Emitter& e, WReg src1, WReg src2) { e.CMP(src1, src2); },
[](A64Emitter& e, WReg src1, int32_t constant) {
e.CMP(src1, constant);
e.MOV(W1, constant);
e.CMP(src1, W1);
});
e.CSET(i.dest, Cond::NE);
}
@ -958,7 +961,8 @@ struct COMPARE_NE_I16
EmitCommutativeCompareOp(
e, i, [](A64Emitter& e, WReg src1, WReg src2) { e.CMP(src1, src2); },
[](A64Emitter& e, WReg src1, int32_t constant) {
e.CMP(src1, constant);
e.MOV(W1, constant);
e.CMP(src1, W1);
});
e.CSET(i.dest, Cond::NE);
}
@ -969,7 +973,8 @@ struct COMPARE_NE_I32
EmitCommutativeCompareOp(
e, i, [](A64Emitter& e, WReg src1, WReg src2) { e.CMP(src1, src2); },
[](A64Emitter& e, WReg src1, int32_t constant) {
e.CMP(src1, constant);
e.MOV(W1, constant);
e.CMP(src1, W1);
});
e.CSET(i.dest, Cond::NE);
}
@ -980,7 +985,8 @@ struct COMPARE_NE_I64
EmitCommutativeCompareOp(
e, i, [](A64Emitter& e, XReg src1, XReg src2) { e.CMP(src1, src2); },
[](A64Emitter& e, XReg src1, int32_t constant) {
e.CMP(src1, constant);
e.MOV(X1, constant);
e.CMP(src1, X1);
});
e.CSET(i.dest, Cond::NE);
}
@ -2610,7 +2616,7 @@ void EmitRotateLeftXX(A64Emitter& e, const ARGS& i) {
if (i.src2.is_constant) {
e.MOV(REG(1), i.src2.constant());
} else {
e.MOV(W0, i.src2.reg().toW());
e.MOV(W1, i.src2.reg().toW());
}
e.LSLV(i.dest, REG(0), REG(1));
@ -2633,13 +2639,39 @@ struct ROTATE_LEFT_I16
struct ROTATE_LEFT_I32
: Sequence<ROTATE_LEFT_I32, I<OPCODE_ROTATE_LEFT, I32Op, I32Op, I8Op>> {
static void Emit(A64Emitter& e, const EmitArgType& i) {
EmitRotateLeftXX<ROTATE_LEFT_I32, WReg>(e, i);
if (i.src1.is_constant) {
e.MOV(W0, i.src1.constant());
} else {
e.MOV(W0, i.src1.reg());
}
if (i.src2.is_constant) {
e.MOV(W1, i.src2.constant());
} else {
e.SXTB(W1, i.src2.reg());
}
e.NEG(W1, W1);
e.ROR(i.dest, W0, W1);
}
};
struct ROTATE_LEFT_I64
: Sequence<ROTATE_LEFT_I64, I<OPCODE_ROTATE_LEFT, I64Op, I64Op, I8Op>> {
static void Emit(A64Emitter& e, const EmitArgType& i) {
EmitRotateLeftXX<ROTATE_LEFT_I64, XReg>(e, i);
if (i.src1.is_constant) {
e.MOV(X0, i.src1.constant());
} else {
e.MOV(X0, i.src1.reg());
}
if (i.src2.is_constant) {
e.MOV(X1, i.src2.constant());
} else {
e.SXTB(X1, i.src2.reg().toW());
}
e.NEG(X1, X1);
e.ROR(i.dest, X0, X1);
}
};
EMITTER_OPCODE_TABLE(OPCODE_ROTATE_LEFT, ROTATE_LEFT_I8, ROTATE_LEFT_I16,