x64ABI: enhance MOVTwo to take an offset
This lets us merge displacements into MOVTwo in trampolines.
This commit is contained in:
parent
2a8936312e
commit
53b44ccb3a
|
@ -181,20 +181,26 @@ void XEmitter::ABI_CallFunctionR(const void *func, X64Reg reg1)
|
|||
// Pass two registers as parameters.
|
||||
void XEmitter::ABI_CallFunctionRR(const void *func, X64Reg reg1, X64Reg reg2)
|
||||
{
|
||||
MOVTwo(64, ABI_PARAM1, reg1, ABI_PARAM2, reg2);
|
||||
MOVTwo(64, ABI_PARAM1, reg1, 0, ABI_PARAM2, reg2);
|
||||
ABI_CallFunction(func);
|
||||
}
|
||||
|
||||
void XEmitter::MOVTwo(int bits, Gen::X64Reg dst1, Gen::X64Reg src1, Gen::X64Reg dst2, Gen::X64Reg src2)
|
||||
void XEmitter::MOVTwo(int bits, Gen::X64Reg dst1, Gen::X64Reg src1, s32 offset1, Gen::X64Reg dst2, Gen::X64Reg src2)
|
||||
{
|
||||
if (dst1 == src2 && dst2 == src1)
|
||||
{
|
||||
XCHG(bits, R(src1), R(src2));
|
||||
if (offset1)
|
||||
ADD(bits, R(dst1), Imm32(offset1));
|
||||
}
|
||||
else if (src2 != dst1)
|
||||
{
|
||||
if (dst1 != src1)
|
||||
if (dst1 != src1 && offset1)
|
||||
LEA(bits, dst1, MDisp(src1, offset1));
|
||||
else if (dst1 != src1)
|
||||
MOV(bits, R(dst1), R(src1));
|
||||
else if (offset1)
|
||||
ADD(bits, R(dst1), Imm32(offset1));
|
||||
if (dst2 != src2)
|
||||
MOV(bits, R(dst2), R(src2));
|
||||
}
|
||||
|
@ -202,8 +208,12 @@ void XEmitter::MOVTwo(int bits, Gen::X64Reg dst1, Gen::X64Reg src1, Gen::X64Reg
|
|||
{
|
||||
if (dst2 != src2)
|
||||
MOV(bits, R(dst2), R(src2));
|
||||
if (dst1 != src1)
|
||||
if (dst1 != src1 && offset1)
|
||||
LEA(bits, dst1, MDisp(src1, offset1));
|
||||
else if (dst1 != src1)
|
||||
MOV(bits, R(dst1), R(src1));
|
||||
else if (offset1)
|
||||
ADD(bits, R(dst1), Imm32(offset1));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -888,7 +888,7 @@ public:
|
|||
void ABI_CallFunctionRR(const void *func, X64Reg reg1, X64Reg reg2);
|
||||
|
||||
// Helper method for the above, or can be used separately.
|
||||
void MOVTwo(int bits, Gen::X64Reg dst1, Gen::X64Reg src1, Gen::X64Reg dst2, Gen::X64Reg src2);
|
||||
void MOVTwo(int bits, Gen::X64Reg dst1, Gen::X64Reg src1, s32 offset, Gen::X64Reg dst2, Gen::X64Reg src2);
|
||||
|
||||
// Saves/restores the registers and adjusts the stack to be aligned as
|
||||
// required by the ABI, where the previous alignment was as specified.
|
||||
|
|
|
@ -48,10 +48,7 @@ const u8* TrampolineCache::GenerateReadTrampoline(const InstructionInfo &info, B
|
|||
ABI_PushRegistersAndAdjustStack(registersInUse, 0);
|
||||
|
||||
int dataRegSize = info.operandSize == 8 ? 64 : 32;
|
||||
MOVTwo(dataRegSize, ABI_PARAM1, addrReg, ABI_PARAM2, dataReg);
|
||||
|
||||
if (info.displacement)
|
||||
ADD(32, R(ABI_PARAM1), Imm32(info.displacement));
|
||||
MOVTwo(dataRegSize, ABI_PARAM1, addrReg, info.displacement, ABI_PARAM2, dataReg);
|
||||
|
||||
switch (info.operandSize)
|
||||
{
|
||||
|
@ -102,8 +99,13 @@ const u8* TrampolineCache::GenerateWriteTrampoline(const InstructionInfo &info,
|
|||
|
||||
if (info.hasImmediate)
|
||||
{
|
||||
if (addrReg != ABI_PARAM2)
|
||||
MOV(64, R(ABI_PARAM2), R(addrReg));
|
||||
if (addrReg != ABI_PARAM2 && info.displacement)
|
||||
LEA(32, ABI_PARAM2, MDisp(addrReg, info.displacement));
|
||||
else if (addrReg != ABI_PARAM2)
|
||||
MOV(32, R(ABI_PARAM2), R(addrReg));
|
||||
else if (info.displacement)
|
||||
ADD(32, R(ABI_PARAM2), Imm32(info.displacement));
|
||||
|
||||
// we have to swap back the immediate to pass it to the write functions
|
||||
switch (info.operandSize)
|
||||
{
|
||||
|
@ -123,11 +125,8 @@ const u8* TrampolineCache::GenerateWriteTrampoline(const InstructionInfo &info,
|
|||
}
|
||||
else
|
||||
{
|
||||
MOVTwo(64, ABI_PARAM1, dataReg, ABI_PARAM2, addrReg);
|
||||
}
|
||||
if (info.displacement)
|
||||
{
|
||||
ADD(32, R(ABI_PARAM2), Imm32(info.displacement));
|
||||
int dataRegSize = info.operandSize == 8 ? 64 : 32;
|
||||
MOVTwo(dataRegSize, ABI_PARAM2, addrReg, info.displacement, ABI_PARAM1, dataReg);
|
||||
}
|
||||
|
||||
switch (info.operandSize)
|
||||
|
|
Loading…
Reference in New Issue