Merge pull request #428 from Sonicadvance1/x86_32-removal
Remove x86_32 support from Dolphin.
This commit is contained in:
commit
4c42b38de1
|
@ -148,14 +148,11 @@ inline u64 _rotr64(u64 x, unsigned int shift){
|
|||
#define fstat64 _fstat64
|
||||
#define fileno _fileno
|
||||
|
||||
#if _M_X86_32
|
||||
#define Crash() {__asm int 3}
|
||||
#else
|
||||
extern "C" {
|
||||
extern "C"
|
||||
{
|
||||
__declspec(dllimport) void __stdcall DebugBreak(void);
|
||||
}
|
||||
#define Crash() {DebugBreak();}
|
||||
#endif // M_IX86
|
||||
#define Crash() {DebugBreak();}
|
||||
#endif // WIN32 ndef
|
||||
|
||||
// Generic function to get last error message.
|
||||
|
|
|
@ -152,14 +152,8 @@ static BOOL GetFunctionInfoFromAddresses( ULONG fnAddress, ULONG stackAddress, L
|
|||
_tcscpy( lpszSymbol, _T("?") );
|
||||
|
||||
// Get symbol info for IP
|
||||
#if _M_X86_32
|
||||
DWORD dwDisp = 0;
|
||||
if ( SymGetSymFromAddr( GetCurrentProcess(), (ULONG)fnAddress, &dwDisp, pSym ) )
|
||||
#else
|
||||
//makes it compile but hell im not sure if this works...
|
||||
DWORD64 dwDisp = 0;
|
||||
if ( SymGetSymFromAddr( GetCurrentProcess(), (ULONG)fnAddress, (PDWORD64)&dwDisp, pSym ) )
|
||||
#endif
|
||||
{
|
||||
// Make the symbol readable for humans
|
||||
UnDecorateSymbolName( pSym->Name, lpszNonUnicodeUnDSymbol, BUFFERSIZE,
|
||||
|
@ -313,15 +307,9 @@ void StackTrace( HANDLE hThread, const char* lpszMessage, FILE *file )
|
|||
}
|
||||
|
||||
::ZeroMemory( &callStack, sizeof(callStack) );
|
||||
#if _M_X86_32
|
||||
callStack.AddrPC.Offset = context.Eip;
|
||||
callStack.AddrStack.Offset = context.Esp;
|
||||
callStack.AddrFrame.Offset = context.Ebp;
|
||||
#else
|
||||
callStack.AddrPC.Offset = context.Rip;
|
||||
callStack.AddrStack.Offset = context.Rsp;
|
||||
callStack.AddrFrame.Offset = context.Rbp;
|
||||
#endif
|
||||
callStack.AddrPC.Mode = AddrModeFlat;
|
||||
callStack.AddrStack.Mode = AddrModeFlat;
|
||||
callStack.AddrFrame.Mode = AddrModeFlat;
|
||||
|
|
|
@ -40,39 +40,25 @@
|
|||
#define USE_RVALUE_REFERENCES
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32) && _M_X86_64
|
||||
#define USE_CONDITION_VARIABLES
|
||||
#elif defined(_WIN32)
|
||||
#define USE_EVENTS
|
||||
#endif
|
||||
|
||||
namespace std
|
||||
{
|
||||
|
||||
class condition_variable
|
||||
{
|
||||
#if defined(_WIN32) && defined(USE_CONDITION_VARIABLES)
|
||||
#if defined(_WIN32)
|
||||
typedef CONDITION_VARIABLE native_type;
|
||||
#elif defined(_WIN32)
|
||||
typedef HANDLE native_type;
|
||||
#else
|
||||
typedef pthread_cond_t native_type;
|
||||
#endif
|
||||
|
||||
public:
|
||||
|
||||
#ifdef USE_EVENTS
|
||||
typedef native_type native_handle_type;
|
||||
#else
|
||||
typedef native_type* native_handle_type;
|
||||
#endif
|
||||
|
||||
condition_variable()
|
||||
{
|
||||
#if defined(_WIN32) && defined(USE_CONDITION_VARIABLES)
|
||||
#if defined(_WIN32)
|
||||
InitializeConditionVariable(&m_handle);
|
||||
#elif defined(_WIN32)
|
||||
m_handle = CreateEvent(nullptr, false, false, nullptr);
|
||||
#else
|
||||
pthread_cond_init(&m_handle, nullptr);
|
||||
#endif
|
||||
|
@ -80,9 +66,7 @@ public:
|
|||
|
||||
~condition_variable()
|
||||
{
|
||||
#if defined(_WIN32) && !defined(USE_CONDITION_VARIABLES)
|
||||
CloseHandle(m_handle);
|
||||
#elif !defined(_WIN32)
|
||||
#ifndef _WIN32
|
||||
pthread_cond_destroy(&m_handle);
|
||||
#endif
|
||||
}
|
||||
|
@ -92,10 +76,8 @@ public:
|
|||
|
||||
void notify_one()
|
||||
{
|
||||
#if defined(_WIN32) && defined(USE_CONDITION_VARIABLES)
|
||||
#if defined(_WIN32)
|
||||
WakeConditionVariable(&m_handle);
|
||||
#elif defined(_WIN32)
|
||||
SetEvent(m_handle);
|
||||
#else
|
||||
pthread_cond_signal(&m_handle);
|
||||
#endif
|
||||
|
@ -103,11 +85,8 @@ public:
|
|||
|
||||
void notify_all()
|
||||
{
|
||||
#if defined(_WIN32) && defined(USE_CONDITION_VARIABLES)
|
||||
#if defined(_WIN32)
|
||||
WakeAllConditionVariable(&m_handle);
|
||||
#elif defined(_WIN32)
|
||||
// TODO: broken
|
||||
SetEvent(m_handle);
|
||||
#else
|
||||
pthread_cond_broadcast(&m_handle);
|
||||
#endif
|
||||
|
@ -116,16 +95,7 @@ public:
|
|||
void wait(unique_lock<mutex>& lock)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
#ifdef USE_SRWLOCKS
|
||||
SleepConditionVariableSRW(&m_handle, lock.mutex()->native_handle(), INFINITE, 0);
|
||||
#elif defined(USE_CONDITION_VARIABLES)
|
||||
SleepConditionVariableCS(&m_handle, lock.mutex()->native_handle(), INFINITE);
|
||||
#else
|
||||
// TODO: broken, the unlock and wait need to be atomic
|
||||
lock.unlock();
|
||||
WaitForSingleObject(m_handle, INFINITE);
|
||||
lock.lock();
|
||||
#endif
|
||||
#else
|
||||
pthread_cond_wait(&m_handle, lock.mutex()->native_handle());
|
||||
#endif
|
||||
|
@ -158,11 +128,7 @@ public:
|
|||
|
||||
native_handle_type native_handle()
|
||||
{
|
||||
#ifdef USE_EVENTS
|
||||
return m_handle;
|
||||
#else
|
||||
return &m_handle;
|
||||
#endif
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -44,10 +44,6 @@
|
|||
#define USE_RVALUE_REFERENCES
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32) && _M_X86_64
|
||||
#define USE_SRWLOCKS
|
||||
#endif
|
||||
|
||||
namespace std
|
||||
{
|
||||
|
||||
|
@ -122,7 +118,6 @@ private:
|
|||
native_type m_handle;
|
||||
};
|
||||
|
||||
#if !defined(_WIN32) || defined(USE_SRWLOCKS)
|
||||
|
||||
class mutex
|
||||
{
|
||||
|
@ -193,11 +188,6 @@ private:
|
|||
native_type m_handle;
|
||||
};
|
||||
|
||||
#else
|
||||
typedef recursive_mutex mutex; // just use CriticalSections
|
||||
|
||||
#endif
|
||||
|
||||
enum defer_lock_t { defer_lock };
|
||||
enum try_to_lock_t { try_to_lock };
|
||||
enum adopt_lock_t { adopt_lock };
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
// Licensed under GPLv2
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#if defined(_WIN32) && defined(_M_X86_64)
|
||||
#if defined(_WIN32)
|
||||
|
||||
#include <math.h>
|
||||
#include <Windows.h>
|
||||
|
|
|
@ -11,26 +11,7 @@ using namespace Gen;
|
|||
// Shared code between Win64 and Unix64
|
||||
|
||||
unsigned int XEmitter::ABI_GetAlignedFrameSize(unsigned int frameSize, bool noProlog) {
|
||||
// On platforms other than Windows 32-bit: At the beginning of a function,
|
||||
// the stack pointer is 4/8 bytes less than a multiple of 16; however, the
|
||||
// function prolog immediately subtracts an appropriate amount to align
|
||||
// it, so no alignment is required around a call.
|
||||
// In the functions generated by ThunkManager::ProtectFunction and some
|
||||
// others, we add the necessary subtraction (and 0x20 bytes shadow space
|
||||
// for Win64) into this rather than having a separate prolog.
|
||||
// On Windows 32-bit, the required alignment is only 4 bytes, so we just
|
||||
// ensure that the frame size isn't misaligned.
|
||||
#if _M_X86_64
|
||||
// expect frameSize == 0
|
||||
frameSize = noProlog ? 0x28 : 0;
|
||||
#elif defined(_WIN32)
|
||||
frameSize = (frameSize + 3) & -4;
|
||||
#else
|
||||
unsigned int existingAlignment = noProlog ? 0xc : 0;
|
||||
frameSize -= existingAlignment;
|
||||
frameSize = (frameSize + 15) & -16;
|
||||
frameSize += existingAlignment;
|
||||
#endif
|
||||
return frameSize;
|
||||
}
|
||||
|
||||
|
@ -38,35 +19,22 @@ void XEmitter::ABI_AlignStack(unsigned int frameSize, bool noProlog) {
|
|||
unsigned int fillSize =
|
||||
ABI_GetAlignedFrameSize(frameSize, noProlog) - frameSize;
|
||||
if (fillSize != 0) {
|
||||
#if _M_X86_64
|
||||
SUB(64, R(RSP), Imm8(fillSize));
|
||||
#else
|
||||
SUB(32, R(ESP), Imm8(fillSize));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void XEmitter::ABI_RestoreStack(unsigned int frameSize, bool noProlog) {
|
||||
unsigned int alignedSize = ABI_GetAlignedFrameSize(frameSize, noProlog);
|
||||
if (alignedSize != 0) {
|
||||
#if _M_X86_64
|
||||
ADD(64, R(RSP), Imm8(alignedSize));
|
||||
#else
|
||||
ADD(32, R(ESP), Imm8(alignedSize));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void XEmitter::ABI_PushRegistersAndAdjustStack(u32 mask, bool noProlog)
|
||||
{
|
||||
int regSize =
|
||||
#if _M_X86_64
|
||||
8;
|
||||
#else
|
||||
4;
|
||||
#endif
|
||||
int regSize = 8;
|
||||
int shadow = 0;
|
||||
#if defined(_WIN32) && _M_X86_64
|
||||
#if defined(_WIN32)
|
||||
shadow = 0x20;
|
||||
#endif
|
||||
int count = 0;
|
||||
|
@ -100,14 +68,9 @@ void XEmitter::ABI_PushRegistersAndAdjustStack(u32 mask, bool noProlog)
|
|||
|
||||
void XEmitter::ABI_PopRegistersAndAdjustStack(u32 mask, bool noProlog)
|
||||
{
|
||||
int regSize =
|
||||
#if _M_X86_64
|
||||
8;
|
||||
#else
|
||||
4;
|
||||
#endif
|
||||
int regSize = 8;
|
||||
int size = 0;
|
||||
#if defined(_WIN32) && _M_X86_64
|
||||
#if defined(_WIN32)
|
||||
size += 0x20;
|
||||
#endif
|
||||
for (int x = 0; x < 16; x++)
|
||||
|
@ -137,152 +100,6 @@ void XEmitter::ABI_PopRegistersAndAdjustStack(u32 mask, bool noProlog)
|
|||
}
|
||||
}
|
||||
|
||||
#if _M_X86_32 // All32
|
||||
|
||||
// Shared code between Win32 and Unix32
|
||||
void XEmitter::ABI_CallFunction(void *func) {
|
||||
ABI_AlignStack(0);
|
||||
CALL(func);
|
||||
ABI_RestoreStack(0);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionC16(void *func, u16 param1) {
|
||||
ABI_AlignStack(1 * 2);
|
||||
PUSH(16, Imm16(param1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(1 * 2);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionCC16(void *func, u32 param1, u16 param2) {
|
||||
ABI_AlignStack(1 * 2 + 1 * 4);
|
||||
PUSH(16, Imm16(param2));
|
||||
PUSH(32, Imm32(param1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(1 * 2 + 1 * 4);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionC(void *func, u32 param1) {
|
||||
ABI_AlignStack(1 * 4);
|
||||
PUSH(32, Imm32(param1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(1 * 4);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionCC(void *func, u32 param1, u32 param2) {
|
||||
ABI_AlignStack(2 * 4);
|
||||
PUSH(32, Imm32(param2));
|
||||
PUSH(32, Imm32(param1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(2 * 4);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionCP(void *func, u32 param1, void *param2) {
|
||||
ABI_AlignStack(2 * 4);
|
||||
PUSH(32, Imm32((u32)param2));
|
||||
PUSH(32, Imm32(param1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(2 * 4);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionCCC(void *func, u32 param1, u32 param2, u32 param3) {
|
||||
ABI_AlignStack(3 * 4);
|
||||
PUSH(32, Imm32(param3));
|
||||
PUSH(32, Imm32(param2));
|
||||
PUSH(32, Imm32(param1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(3 * 4);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionCCP(void *func, u32 param1, u32 param2, void *param3) {
|
||||
ABI_AlignStack(3 * 4);
|
||||
PUSH(32, Imm32((u32)param3));
|
||||
PUSH(32, Imm32(param2));
|
||||
PUSH(32, Imm32(param1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(3 * 4);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionCCCP(void *func, u32 param1, u32 param2,u32 param3, void *param4) {
|
||||
ABI_AlignStack(4 * 4);
|
||||
PUSH(32, Imm32((u32)param4));
|
||||
PUSH(32, Imm32(param3));
|
||||
PUSH(32, Imm32(param2));
|
||||
PUSH(32, Imm32(param1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(4 * 4);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionPC(void *func, void *param1, u32 param2) {
|
||||
ABI_AlignStack(2 * 4);
|
||||
PUSH(32, Imm32(param2));
|
||||
PUSH(32, Imm32((u32)param1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(2 * 4);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionPPC(void *func, void *param1, void *param2,u32 param3) {
|
||||
ABI_AlignStack(3 * 4);
|
||||
PUSH(32, Imm32(param3));
|
||||
PUSH(32, Imm32((u32)param2));
|
||||
PUSH(32, Imm32((u32)param1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(3 * 4);
|
||||
}
|
||||
|
||||
// Pass a register as a parameter.
|
||||
void XEmitter::ABI_CallFunctionR(void *func, X64Reg reg1) {
|
||||
ABI_AlignStack(1 * 4);
|
||||
PUSH(32, R(reg1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(1 * 4);
|
||||
}
|
||||
|
||||
// Pass two registers as parameters.
|
||||
void XEmitter::ABI_CallFunctionRR(void *func, Gen::X64Reg reg1, Gen::X64Reg reg2, bool noProlog)
|
||||
{
|
||||
ABI_AlignStack(2 * 4, noProlog);
|
||||
PUSH(32, R(reg2));
|
||||
PUSH(32, R(reg1));
|
||||
CALL(func);
|
||||
ABI_RestoreStack(2 * 4, noProlog);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionAC(void *func, const Gen::OpArg &arg1, u32 param2)
|
||||
{
|
||||
ABI_AlignStack(2 * 4);
|
||||
PUSH(32, Imm32(param2));
|
||||
PUSH(32, arg1);
|
||||
CALL(func);
|
||||
ABI_RestoreStack(2 * 4);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_CallFunctionA(void *func, const Gen::OpArg &arg1)
|
||||
{
|
||||
ABI_AlignStack(1 * 4);
|
||||
PUSH(32, arg1);
|
||||
CALL(func);
|
||||
ABI_RestoreStack(1 * 4);
|
||||
}
|
||||
|
||||
void XEmitter::ABI_PushAllCalleeSavedRegsAndAdjustStack() {
|
||||
PUSH(EBP);
|
||||
MOV(32, R(EBP), R(ESP));
|
||||
PUSH(EBX);
|
||||
PUSH(ESI);
|
||||
PUSH(EDI);
|
||||
SUB(32, R(ESP), Imm8(0xc));
|
||||
}
|
||||
|
||||
void XEmitter::ABI_PopAllCalleeSavedRegsAndAdjustStack() {
|
||||
ADD(32, R(ESP), Imm8(0xc));
|
||||
POP(EDI);
|
||||
POP(ESI);
|
||||
POP(EBX);
|
||||
POP(EBP);
|
||||
}
|
||||
|
||||
#else //64bit
|
||||
|
||||
// Common functions
|
||||
void XEmitter::ABI_CallFunction(void *func) {
|
||||
ABI_AlignStack(0);
|
||||
|
@ -643,6 +460,3 @@ void XEmitter::ABI_PopAllCalleeSavedRegsAndAdjustStack() {
|
|||
|
||||
#endif // WIN32
|
||||
|
||||
#endif // 32bit
|
||||
|
||||
|
||||
|
|
|
@ -6,17 +6,9 @@
|
|||
|
||||
#include "Common/x64Emitter.h"
|
||||
|
||||
// x86/x64 ABI:s, and helpers to help follow them when JIT-ing code.
|
||||
// x64 ABI:s, and helpers to help follow them when JIT-ing code.
|
||||
// All convensions return values in EAX (+ possibly EDX).
|
||||
|
||||
// Linux 32-bit, Windows 32-bit (cdecl, System V):
|
||||
// * Caller pushes left to right
|
||||
// * Caller fixes stack after call
|
||||
// * function subtract from stack for local storage only.
|
||||
// Scratch: EAX ECX EDX
|
||||
// Callee-save: EBX ESI EDI EBP
|
||||
// Parameters: -
|
||||
|
||||
// Windows 64-bit
|
||||
// * 4-reg "fastcall" variant, very new-skool stack handling
|
||||
// * Callee moves stack pointer, to make room for shadow regs for the biggest function _it itself calls_
|
||||
|
@ -31,22 +23,6 @@
|
|||
// Callee-save: RBX RBP R12 R13 R14 R15
|
||||
// Parameters: RDI RSI RDX RCX R8 R9
|
||||
|
||||
#if _M_X86_32 // 32 bit calling convention, shared by all
|
||||
|
||||
// 32-bit don't pass parameters in regs, but these are convenient to have anyway when we have to
|
||||
// choose regs to put stuff in.
|
||||
#define ABI_PARAM1 RCX
|
||||
#define ABI_PARAM2 RDX
|
||||
|
||||
// There are no ABI_PARAM* here, since args are pushed.
|
||||
// 32-bit bog standard cdecl, shared between linux and windows
|
||||
// MacOSX 32-bit is same as System V with a few exceptions that we probably don't care much about.
|
||||
|
||||
#define ABI_ALL_CALLEE_SAVED ((1 << EAX) | (1 << ECX) | (1 << EDX) | \
|
||||
0xff00 /* xmm0..7 */)
|
||||
|
||||
#else // 64 bit calling convention
|
||||
|
||||
#ifdef _WIN32 // 64-bit Windows - the really exotic calling convention
|
||||
|
||||
#define ABI_PARAM1 RCX
|
||||
|
@ -74,4 +50,3 @@
|
|||
|
||||
#endif // WIN32
|
||||
|
||||
#endif // X86
|
||||
|
|
|
@ -90,22 +90,12 @@ CPUInfo::CPUInfo() {
|
|||
void CPUInfo::Detect()
|
||||
{
|
||||
memset(this, 0, sizeof(*this));
|
||||
#if _M_X86_32
|
||||
Mode64bit = false;
|
||||
#elif _M_X86_64
|
||||
#ifdef _M_X86_64
|
||||
Mode64bit = true;
|
||||
OS64bit = true;
|
||||
#endif
|
||||
num_cores = 1;
|
||||
|
||||
#ifdef _WIN32
|
||||
#if _M_X86_32
|
||||
BOOL f64 = false;
|
||||
IsWow64Process(GetCurrentProcess(), &f64);
|
||||
OS64bit = (f64 == TRUE) ? true : false;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Set obvious defaults, for extra safety
|
||||
if (Mode64bit) {
|
||||
bSSE = true;
|
||||
|
|
|
@ -125,7 +125,6 @@ void XEmitter::WriteSIB(int scale, int index, int base)
|
|||
void OpArg::WriteRex(XEmitter *emit, int opBits, int bits, int customOp) const
|
||||
{
|
||||
if (customOp == -1) customOp = operandReg;
|
||||
#if _M_X86_64
|
||||
u8 op = 0x40;
|
||||
// REX.W (whether operation is a 64-bit operation)
|
||||
if (opBits == 64) op |= 8;
|
||||
|
@ -145,17 +144,6 @@ void OpArg::WriteRex(XEmitter *emit, int opBits, int bits, int customOp) const
|
|||
_dbg_assert_(DYNA_REC, (offsetOrBaseReg & 0x100) == 0);
|
||||
_dbg_assert_(DYNA_REC, (customOp & 0x100) == 0);
|
||||
}
|
||||
#else
|
||||
// Make sure we don't perform a 64-bit operation.
|
||||
_dbg_assert_(DYNA_REC, opBits != 64);
|
||||
// Make sure the operation doesn't access R8-R15 registers.
|
||||
_dbg_assert_(DYNA_REC, (customOp & 8) == 0);
|
||||
_dbg_assert_(DYNA_REC, (indexReg & 8) == 0);
|
||||
_dbg_assert_(DYNA_REC, (offsetOrBaseReg & 8) == 0);
|
||||
// Make sure the operation doesn't access SIL, DIL, BPL, or SPL.
|
||||
_dbg_assert_(DYNA_REC, opBits != 8 || (customOp & 0x10c) != 4);
|
||||
_dbg_assert_(DYNA_REC, scale != SCALE_NONE || bits != 8 || (offsetOrBaseReg & 0x10c) != 4);
|
||||
#endif
|
||||
}
|
||||
|
||||
void OpArg::WriteVex(XEmitter* emit, int size, int packed, Gen::X64Reg regOp1, Gen::X64Reg regOp2) const
|
||||
|
@ -208,7 +196,6 @@ void OpArg::WriteRest(XEmitter *emit, int extraBytes, X64Reg _operandReg,
|
|||
_offsetOrBaseReg = 5;
|
||||
emit->WriteModRM(0, _operandReg, _offsetOrBaseReg);
|
||||
//TODO : add some checks
|
||||
#if _M_X86_64
|
||||
u64 ripAddr = (u64)emit->GetCodePtr() + 4 + extraBytes;
|
||||
s64 distance = (s64)offset - (s64)ripAddr;
|
||||
_assert_msg_(DYNA_REC,
|
||||
|
@ -219,9 +206,6 @@ void OpArg::WriteRest(XEmitter *emit, int extraBytes, X64Reg _operandReg,
|
|||
ripAddr, offset);
|
||||
s32 offs = (s32)distance;
|
||||
emit->Write32((u32)offs);
|
||||
#else
|
||||
emit->Write32((u32)offset);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1267,7 +1251,6 @@ void XEmitter::MOVD_xmm(X64Reg dest, const OpArg &arg) {WriteSSEOp(64, 0x6E, tru
|
|||
void XEmitter::MOVD_xmm(const OpArg &arg, X64Reg src) {WriteSSEOp(64, 0x7E, true, src, arg, 0);}
|
||||
|
||||
void XEmitter::MOVQ_xmm(X64Reg dest, OpArg arg) {
|
||||
#if _M_X86_64
|
||||
// Alternate encoding
|
||||
// This does not display correctly in MSVC's debugger, it thinks it's a MOVD
|
||||
arg.operandReg = dest;
|
||||
|
@ -1276,13 +1259,6 @@ void XEmitter::MOVQ_xmm(X64Reg dest, OpArg arg) {
|
|||
Write8(0x0f);
|
||||
Write8(0x6E);
|
||||
arg.WriteRest(this, 0);
|
||||
#else
|
||||
arg.operandReg = dest;
|
||||
Write8(0xF3);
|
||||
Write8(0x0f);
|
||||
Write8(0x7E);
|
||||
arg.WriteRest(this, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
void XEmitter::MOVQ_xmm(OpArg arg, X64Reg src) {
|
||||
|
@ -1626,8 +1602,6 @@ void XEmitter::RTDSC() { Write8(0x0F); Write8(0x31); }
|
|||
void XEmitter::CallCdeclFunction3(void* fnptr, u32 arg0, u32 arg1, u32 arg2)
|
||||
{
|
||||
using namespace Gen;
|
||||
#if _M_X86_64
|
||||
|
||||
#ifdef _MSC_VER
|
||||
MOV(32, R(RCX), Imm32(arg0));
|
||||
MOV(32, R(RDX), Imm32(arg1));
|
||||
|
@ -1639,26 +1613,11 @@ void XEmitter::CallCdeclFunction3(void* fnptr, u32 arg0, u32 arg1, u32 arg2)
|
|||
MOV(32, R(RDX), Imm32(arg2));
|
||||
CALL(fnptr);
|
||||
#endif
|
||||
|
||||
#else
|
||||
ABI_AlignStack(3 * 4);
|
||||
PUSH(32, Imm32(arg2));
|
||||
PUSH(32, Imm32(arg1));
|
||||
PUSH(32, Imm32(arg0));
|
||||
CALL(fnptr);
|
||||
#ifdef _WIN32
|
||||
// don't inc stack
|
||||
#else
|
||||
ABI_RestoreStack(3 * 4);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void XEmitter::CallCdeclFunction4(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 arg3)
|
||||
{
|
||||
using namespace Gen;
|
||||
#if _M_X86_64
|
||||
|
||||
#ifdef _MSC_VER
|
||||
MOV(32, R(RCX), Imm32(arg0));
|
||||
MOV(32, R(RDX), Imm32(arg1));
|
||||
|
@ -1672,27 +1631,11 @@ void XEmitter::CallCdeclFunction4(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32
|
|||
MOV(32, R(RCX), Imm32(arg3));
|
||||
CALL(fnptr);
|
||||
#endif
|
||||
|
||||
#else
|
||||
ABI_AlignStack(4 * 4);
|
||||
PUSH(32, Imm32(arg3));
|
||||
PUSH(32, Imm32(arg2));
|
||||
PUSH(32, Imm32(arg1));
|
||||
PUSH(32, Imm32(arg0));
|
||||
CALL(fnptr);
|
||||
#ifdef _WIN32
|
||||
// don't inc stack
|
||||
#else
|
||||
ABI_RestoreStack(4 * 4);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void XEmitter::CallCdeclFunction5(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
|
||||
{
|
||||
using namespace Gen;
|
||||
#if _M_X86_64
|
||||
|
||||
#ifdef _MSC_VER
|
||||
MOV(32, R(RCX), Imm32(arg0));
|
||||
MOV(32, R(RDX), Imm32(arg1));
|
||||
|
@ -1708,28 +1651,11 @@ void XEmitter::CallCdeclFunction5(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32
|
|||
MOV(32, R(R8), Imm32(arg4));
|
||||
CALL(fnptr);
|
||||
#endif
|
||||
|
||||
#else
|
||||
ABI_AlignStack(5 * 4);
|
||||
PUSH(32, Imm32(arg4));
|
||||
PUSH(32, Imm32(arg3));
|
||||
PUSH(32, Imm32(arg2));
|
||||
PUSH(32, Imm32(arg1));
|
||||
PUSH(32, Imm32(arg0));
|
||||
CALL(fnptr);
|
||||
#ifdef _WIN32
|
||||
// don't inc stack
|
||||
#else
|
||||
ABI_RestoreStack(5 * 4);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void XEmitter::CallCdeclFunction6(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
|
||||
{
|
||||
using namespace Gen;
|
||||
#if _M_X86_64
|
||||
|
||||
#ifdef _MSC_VER
|
||||
MOV(32, R(RCX), Imm32(arg0));
|
||||
MOV(32, R(RDX), Imm32(arg1));
|
||||
|
@ -1747,26 +1673,8 @@ void XEmitter::CallCdeclFunction6(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32
|
|||
MOV(32, R(R9), Imm32(arg5));
|
||||
CALL(fnptr);
|
||||
#endif
|
||||
|
||||
#else
|
||||
ABI_AlignStack(6 * 4);
|
||||
PUSH(32, Imm32(arg5));
|
||||
PUSH(32, Imm32(arg4));
|
||||
PUSH(32, Imm32(arg3));
|
||||
PUSH(32, Imm32(arg2));
|
||||
PUSH(32, Imm32(arg1));
|
||||
PUSH(32, Imm32(arg0));
|
||||
CALL(fnptr);
|
||||
#ifdef _WIN32
|
||||
// don't inc stack
|
||||
#else
|
||||
ABI_RestoreStack(6 * 4);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
#if _M_X86_64
|
||||
|
||||
// See header
|
||||
void XEmitter::___CallCdeclImport3(void* impptr, u32 arg0, u32 arg1, u32 arg2) {
|
||||
MOV(32, R(RCX), Imm32(arg0));
|
||||
|
@ -1799,6 +1707,4 @@ void XEmitter::___CallCdeclImport6(void* impptr, u32 arg0, u32 arg1, u32 arg2, u
|
|||
CALLptr(M(impptr));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
|
|
|
@ -702,11 +702,7 @@ public:
|
|||
void ABI_AlignStack(unsigned int frameSize, bool noProlog = false);
|
||||
void ABI_RestoreStack(unsigned int frameSize, bool noProlog = false);
|
||||
|
||||
#if _M_X86_32
|
||||
inline int ABI_GetNumXMMRegs() { return 8; }
|
||||
#else
|
||||
inline int ABI_GetNumXMMRegs() { return 16; }
|
||||
#endif
|
||||
|
||||
// Strange call wrappers.
|
||||
void CallCdeclFunction3(void* fnptr, u32 arg0, u32 arg1, u32 arg2);
|
||||
|
@ -714,17 +710,6 @@ public:
|
|||
void CallCdeclFunction5(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
|
||||
void CallCdeclFunction6(void* fnptr, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 arg5);
|
||||
|
||||
#if _M_X86_32
|
||||
|
||||
#define CallCdeclFunction3_I(a,b,c,d) CallCdeclFunction3((void *)(a), (b), (c), (d))
|
||||
#define CallCdeclFunction4_I(a,b,c,d,e) CallCdeclFunction4((void *)(a), (b), (c), (d), (e))
|
||||
#define CallCdeclFunction5_I(a,b,c,d,e,f) CallCdeclFunction5((void *)(a), (b), (c), (d), (e), (f))
|
||||
#define CallCdeclFunction6_I(a,b,c,d,e,f,g) CallCdeclFunction6((void *)(a), (b), (c), (d), (e), (f), (g))
|
||||
|
||||
#define DECLARE_IMPORT(x)
|
||||
|
||||
#else
|
||||
|
||||
// Comments from VertexLoader.cpp about these horrors:
|
||||
|
||||
// This is a horrible hack that is necessary in 64-bit mode because Opengl32.dll is based way, way above the 32-bit
|
||||
|
@ -743,8 +728,6 @@ public:
|
|||
|
||||
#define DECLARE_IMPORT(x) extern "C" void *__imp_##x
|
||||
|
||||
#endif
|
||||
|
||||
// Utility to generate a call to a std::function object.
|
||||
//
|
||||
// Unfortunately, calling operator() directly is undefined behavior in C++
|
||||
|
|
|
@ -32,30 +32,9 @@ namespace FPURoundMode
|
|||
fesetround(rounding_mode_lut[mode]);
|
||||
}
|
||||
|
||||
void SetPrecisionMode(PrecisionMode mode)
|
||||
void SetPrecisionMode(PrecisionMode /* mode */)
|
||||
{
|
||||
#ifdef _M_X86_32
|
||||
// sets the floating-point lib to 53-bit
|
||||
// PowerPC has a 53bit floating pipeline only
|
||||
// eg: sscanf is very sensitive
|
||||
#ifdef _WIN32
|
||||
_control87(_PC_53, MCW_PC);
|
||||
#else
|
||||
const unsigned short PRECISION_MASK = 3 << 8;
|
||||
const unsigned short precision_table[] = {
|
||||
0 << 8, // 24 bits
|
||||
2 << 8, // 53 bits
|
||||
3 << 8, // 64 bits
|
||||
};
|
||||
unsigned short _mode;
|
||||
asm ("fstcw %0" : "=m" (_mode));
|
||||
_mode = (_mode & ~PRECISION_MASK) | precision_table[mode];
|
||||
asm ("fldcw %0" : : "m" (_mode));
|
||||
#endif
|
||||
#else
|
||||
//x64 doesn't need this - fpu is done with SSE
|
||||
//but still - set any useful sse options here
|
||||
#endif
|
||||
//x64 doesn't need this - fpu is done with SSE
|
||||
}
|
||||
|
||||
void SetSIMDMode(int rounding_mode, bool non_ieee_mode)
|
||||
|
|
|
@ -401,15 +401,9 @@ void DSPEmitter::CompileDispatcher()
|
|||
|
||||
|
||||
// Execute block. Cycles executed returned in EAX.
|
||||
#if _M_X86_32
|
||||
MOVZX(32, 16, ECX, M(&g_dsp.pc));
|
||||
MOV(32, R(EBX), ImmPtr(blocks));
|
||||
JMPptr(MComplex(EBX, ECX, SCALE_4, 0));
|
||||
#else
|
||||
MOVZX(64, 16, ECX, M(&g_dsp.pc));//for clarity, use 64 here.
|
||||
MOVZX(64, 16, ECX, M(&g_dsp.pc));
|
||||
MOV(64, R(RBX), ImmPtr(blocks));
|
||||
JMPptr(MComplex(RBX, RCX, SCALE_8, 0));
|
||||
#endif
|
||||
|
||||
returnDispatcher = GetCodePtr();
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@ using namespace Gen;
|
|||
// flags out: --10 0100
|
||||
void DSPEmitter::clr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 reg = (opc >> 11) & 0x1;
|
||||
// dsp_set_long_acc(reg, 0);
|
||||
MOV(64, R(RAX), Imm64(0));
|
||||
|
@ -28,9 +27,6 @@ void DSPEmitter::clr(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CLRL $acR.l
|
||||
|
@ -40,7 +36,6 @@ void DSPEmitter::clr(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::clrl(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 reg = (opc >> 8) & 0x1;
|
||||
// s64 acc = dsp_round_long_acc(dsp_get_long_acc(reg));
|
||||
get_long_acc(reg);
|
||||
|
@ -52,9 +47,6 @@ void DSPEmitter::clrl(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -68,7 +60,6 @@ void DSPEmitter::clrl(const UDSPInstruction opc)
|
|||
// flags out: -x-- ----
|
||||
void DSPEmitter::andcf(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
if (FlagsNeeded())
|
||||
{
|
||||
u8 reg = (opc >> 8) & 0x1;
|
||||
|
@ -93,9 +84,6 @@ void DSPEmitter::andcf(const UDSPInstruction opc)
|
|||
SetJumpTarget(exit);
|
||||
gpr.putReg(DSP_REG_SR);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ANDF $acD.m, #I
|
||||
|
@ -108,7 +96,6 @@ void DSPEmitter::andcf(const UDSPInstruction opc)
|
|||
// flags out: -x-- ----
|
||||
void DSPEmitter::andf(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
if (FlagsNeeded())
|
||||
{
|
||||
u8 reg = (opc >> 8) & 0x1;
|
||||
|
@ -132,9 +119,6 @@ void DSPEmitter::andf(const UDSPInstruction opc)
|
|||
SetJumpTarget(exit);
|
||||
gpr.putReg(DSP_REG_SR);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -146,7 +130,6 @@ void DSPEmitter::andf(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::tst(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
if (FlagsNeeded())
|
||||
{
|
||||
u8 reg = (opc >> 11) & 0x1;
|
||||
|
@ -155,9 +138,6 @@ void DSPEmitter::tst(const UDSPInstruction opc)
|
|||
// Update_SR_Register64(acc);
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// TSTAXH $axR.h
|
||||
|
@ -167,7 +147,6 @@ void DSPEmitter::tst(const UDSPInstruction opc)
|
|||
// flags out: --x0 xx00
|
||||
void DSPEmitter::tstaxh(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
if (FlagsNeeded())
|
||||
{
|
||||
u8 reg = (opc >> 8) & 0x1;
|
||||
|
@ -176,9 +155,6 @@ void DSPEmitter::tstaxh(const UDSPInstruction opc)
|
|||
// Update_SR_Register16(val);
|
||||
Update_SR_Register16();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -190,7 +166,6 @@ void DSPEmitter::tstaxh(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::cmp(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
if (FlagsNeeded())
|
||||
{
|
||||
X64Reg tmp1;
|
||||
|
@ -207,9 +182,6 @@ void DSPEmitter::cmp(const UDSPInstruction opc)
|
|||
Update_SR_Register64_Carry2(EAX, tmp1);
|
||||
gpr.putXReg(tmp1);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CMPAR $acS axR.h
|
||||
|
@ -220,7 +192,6 @@ void DSPEmitter::cmp(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::cmpar(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
if (FlagsNeeded())
|
||||
{
|
||||
u8 rreg = ((opc >> 12) & 0x1);
|
||||
|
@ -242,9 +213,6 @@ void DSPEmitter::cmpar(const UDSPInstruction opc)
|
|||
Update_SR_Register64_Carry2(EAX, tmp1);
|
||||
gpr.putXReg(tmp1);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CMPI $amD, #I
|
||||
|
@ -256,7 +224,6 @@ void DSPEmitter::cmpar(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::cmpi(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
if (FlagsNeeded())
|
||||
{
|
||||
u8 reg = (opc >> 8) & 0x1;
|
||||
|
@ -275,9 +242,6 @@ void DSPEmitter::cmpi(const UDSPInstruction opc)
|
|||
Update_SR_Register64_Carry2(EAX, tmp1);
|
||||
gpr.putXReg(tmp1);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CMPIS $acD, #I
|
||||
|
@ -289,7 +253,6 @@ void DSPEmitter::cmpi(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::cmpis(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
if (FlagsNeeded())
|
||||
{
|
||||
u8 areg = (opc >> 8) & 0x1;
|
||||
|
@ -308,9 +271,6 @@ void DSPEmitter::cmpis(const UDSPInstruction opc)
|
|||
Update_SR_Register64_Carry2(EAX, tmp1);
|
||||
gpr.putXReg(tmp1);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -324,7 +284,6 @@ void DSPEmitter::cmpis(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::xorr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
// u16 accm = g_dsp.r.acm[dreg] ^ g_dsp.r.axh[sreg];
|
||||
|
@ -339,9 +298,6 @@ void DSPEmitter::xorr(const UDSPInstruction opc)
|
|||
get_long_acc(dreg, RCX);
|
||||
Update_SR_Register16_OverS32();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ANDR $acD.m, $axS.h
|
||||
|
@ -353,7 +309,6 @@ void DSPEmitter::xorr(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::andr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
// u16 accm = g_dsp.r.acm[dreg] & g_dsp.r.axh[sreg];
|
||||
|
@ -368,9 +323,6 @@ void DSPEmitter::andr(const UDSPInstruction opc)
|
|||
get_long_acc(dreg, RCX);
|
||||
Update_SR_Register16_OverS32();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ORR $acD.m, $axS.h
|
||||
|
@ -382,7 +334,6 @@ void DSPEmitter::andr(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::orr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
// u16 accm = g_dsp.r.acm[dreg] | g_dsp.r.axh[sreg];
|
||||
|
@ -397,9 +348,6 @@ void DSPEmitter::orr(const UDSPInstruction opc)
|
|||
get_long_acc(dreg, RCX);
|
||||
Update_SR_Register16_OverS32();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ANDC $acD.m, $ac(1-D).m
|
||||
|
@ -411,7 +359,6 @@ void DSPEmitter::orr(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::andc(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
// u16 accm = g_dsp.r.acm[dreg] & g_dsp.r.acm[1 - dreg];
|
||||
get_acc_m(dreg, RAX);
|
||||
|
@ -425,9 +372,6 @@ void DSPEmitter::andc(const UDSPInstruction opc)
|
|||
get_long_acc(dreg, RCX);
|
||||
Update_SR_Register16_OverS32();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ORC $acD.m, $ac(1-D).m
|
||||
|
@ -439,7 +383,6 @@ void DSPEmitter::andc(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::orc(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
// u16 accm = g_dsp.r.acm[dreg] | g_dsp.r.acm[1 - dreg];
|
||||
get_acc_m(dreg, RAX);
|
||||
|
@ -453,9 +396,6 @@ void DSPEmitter::orc(const UDSPInstruction opc)
|
|||
get_long_acc(dreg, RCX);
|
||||
Update_SR_Register16_OverS32();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// XORC $acD.m
|
||||
|
@ -466,7 +406,6 @@ void DSPEmitter::orc(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::xorc(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
// u16 accm = g_dsp.r.acm[dreg] ^ g_dsp.r.acm[1 - dreg];
|
||||
get_acc_m(dreg, RAX);
|
||||
|
@ -480,9 +419,6 @@ void DSPEmitter::xorc(const UDSPInstruction opc)
|
|||
get_long_acc(dreg, RCX);
|
||||
Update_SR_Register16_OverS32();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// NOT $acD.m
|
||||
|
@ -493,7 +429,6 @@ void DSPEmitter::xorc(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::notc(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
// u16 accm = g_dsp.r.acm[dreg] ^ 0xffff;
|
||||
get_acc_m(dreg, RAX);
|
||||
|
@ -506,9 +441,6 @@ void DSPEmitter::notc(const UDSPInstruction opc)
|
|||
get_long_acc(dreg, RCX);
|
||||
Update_SR_Register16_OverS32();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// XORI $acD.m, #I
|
||||
|
@ -520,7 +452,6 @@ void DSPEmitter::notc(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::xori(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 reg = (opc >> 8) & 0x1;
|
||||
// u16 imm = dsp_fetch_code();
|
||||
u16 imm = dsp_imem_read(compilePC+1);
|
||||
|
@ -534,9 +465,6 @@ void DSPEmitter::xori(const UDSPInstruction opc)
|
|||
get_long_acc(reg, RCX);
|
||||
Update_SR_Register16_OverS32();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ANDI $acD.m, #I
|
||||
|
@ -547,7 +475,6 @@ void DSPEmitter::xori(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::andi(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 reg = (opc >> 8) & 0x1;
|
||||
// u16 imm = dsp_fetch_code();
|
||||
u16 imm = dsp_imem_read(compilePC+1);
|
||||
|
@ -561,9 +488,6 @@ void DSPEmitter::andi(const UDSPInstruction opc)
|
|||
get_long_acc(reg, RCX);
|
||||
Update_SR_Register16_OverS32();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ORI $acD.m, #I
|
||||
|
@ -574,7 +498,6 @@ void DSPEmitter::andi(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::ori(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 reg = (opc >> 8) & 0x1;
|
||||
// u16 imm = dsp_fetch_code();
|
||||
u16 imm = dsp_imem_read(compilePC+1);
|
||||
|
@ -588,9 +511,6 @@ void DSPEmitter::ori(const UDSPInstruction opc)
|
|||
get_long_acc(reg, RCX);
|
||||
Update_SR_Register16_OverS32();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -602,7 +522,6 @@ void DSPEmitter::ori(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::addr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = ((opc >> 9) & 0x3) + DSP_REG_AXL0;
|
||||
|
||||
|
@ -630,9 +549,6 @@ void DSPEmitter::addr(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ADDAX $acD, $axS
|
||||
|
@ -642,7 +558,6 @@ void DSPEmitter::addr(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::addax(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
|
||||
|
@ -669,9 +584,6 @@ void DSPEmitter::addax(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ADD $acD, $ac(1-D)
|
||||
|
@ -681,7 +593,6 @@ void DSPEmitter::addax(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::add(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
|
||||
X64Reg tmp1;
|
||||
|
@ -707,9 +618,6 @@ void DSPEmitter::add(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ADDP $acD
|
||||
|
@ -719,7 +627,6 @@ void DSPEmitter::add(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::addp(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
|
||||
X64Reg tmp1;
|
||||
|
@ -745,9 +652,6 @@ void DSPEmitter::addp(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ADDAXL $acD, $axS.l
|
||||
|
@ -758,7 +662,6 @@ void DSPEmitter::addp(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::addaxl(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
|
||||
|
@ -786,9 +689,6 @@ void DSPEmitter::addaxl(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ADDI $amR, #I
|
||||
|
@ -799,7 +699,6 @@ void DSPEmitter::addaxl(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::addi(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 areg = (opc >> 8) & 0x1;
|
||||
X64Reg tmp1;
|
||||
gpr.getFreeXReg(tmp1);
|
||||
|
@ -828,9 +727,6 @@ void DSPEmitter::addi(const UDSPInstruction opc)
|
|||
set_long_acc(areg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ADDIS $acD, #I
|
||||
|
@ -840,7 +736,6 @@ void DSPEmitter::addi(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::addis(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
|
||||
X64Reg tmp1;
|
||||
|
@ -869,9 +764,6 @@ void DSPEmitter::addis(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// INCM $acsD
|
||||
|
@ -881,7 +773,6 @@ void DSPEmitter::addis(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::incm(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
s64 subtract = 0x10000;
|
||||
X64Reg tmp1;
|
||||
|
@ -906,9 +797,6 @@ void DSPEmitter::incm(const UDSPInstruction opc)
|
|||
set_long_acc(dreg);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// INC $acD
|
||||
|
@ -918,7 +806,6 @@ void DSPEmitter::incm(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::inc(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
X64Reg tmp1;
|
||||
gpr.getFreeXReg(tmp1);
|
||||
|
@ -942,9 +829,6 @@ void DSPEmitter::inc(const UDSPInstruction opc)
|
|||
set_long_acc(dreg);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -956,7 +840,6 @@ void DSPEmitter::inc(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::subr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = ((opc >> 9) & 0x3) + DSP_REG_AXL0;
|
||||
|
||||
|
@ -986,9 +869,6 @@ void DSPEmitter::subr(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// SUBAX $acD, $axS
|
||||
|
@ -998,7 +878,6 @@ void DSPEmitter::subr(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::subax(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
|
||||
|
@ -1026,9 +905,6 @@ void DSPEmitter::subax(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// SUB $acD, $ac(1-D)
|
||||
|
@ -1038,7 +914,6 @@ void DSPEmitter::subax(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::sub(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
X64Reg tmp1;
|
||||
gpr.getFreeXReg(tmp1);
|
||||
|
@ -1064,9 +939,6 @@ void DSPEmitter::sub(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// SUBP $acD
|
||||
|
@ -1076,7 +948,6 @@ void DSPEmitter::sub(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::subp(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
X64Reg tmp1;
|
||||
gpr.getFreeXReg(tmp1);
|
||||
|
@ -1102,9 +973,6 @@ void DSPEmitter::subp(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// DECM $acsD
|
||||
|
@ -1114,7 +982,6 @@ void DSPEmitter::subp(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::decm(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x01;
|
||||
s64 subtract = 0x10000;
|
||||
X64Reg tmp1;
|
||||
|
@ -1139,9 +1006,6 @@ void DSPEmitter::decm(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// DEC $acD
|
||||
|
@ -1151,7 +1015,6 @@ void DSPEmitter::decm(const UDSPInstruction opc)
|
|||
// flags out: x-xx xxxx
|
||||
void DSPEmitter::dec(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x01;
|
||||
X64Reg tmp1;
|
||||
gpr.getFreeXReg(tmp1);
|
||||
|
@ -1175,9 +1038,6 @@ void DSPEmitter::dec(const UDSPInstruction opc)
|
|||
set_long_acc(dreg);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -1189,7 +1049,6 @@ void DSPEmitter::dec(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::neg(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
// s64 acc = dsp_get_long_acc(dreg);
|
||||
get_long_acc(dreg);
|
||||
|
@ -1202,9 +1061,6 @@ void DSPEmitter::neg(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ABS $acD
|
||||
|
@ -1214,7 +1070,6 @@ void DSPEmitter::neg(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::abs(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 11) & 0x1;
|
||||
|
||||
// s64 acc = dsp_get_long_acc(dreg);
|
||||
|
@ -1230,9 +1085,6 @@ void DSPEmitter::abs(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
//----
|
||||
|
||||
|
@ -1245,7 +1097,6 @@ void DSPEmitter::abs(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::movr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 areg = (opc >> 8) & 0x1;
|
||||
u8 sreg = ((opc >> 9) & 0x3) + DSP_REG_AXL0;
|
||||
|
||||
|
@ -1260,9 +1111,6 @@ void DSPEmitter::movr(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MOVAX $acD, $axS
|
||||
|
@ -1272,7 +1120,6 @@ void DSPEmitter::movr(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::movax(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
|
||||
|
@ -1285,9 +1132,6 @@ void DSPEmitter::movax(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MOV $acD, $ac(1-D)
|
||||
|
@ -1297,7 +1141,6 @@ void DSPEmitter::movax(const UDSPInstruction opc)
|
|||
// flags out: --x0 xx00
|
||||
void DSPEmitter::mov(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
// u64 acc = dsp_get_long_acc(1 - dreg);
|
||||
get_long_acc(1 - dreg);
|
||||
|
@ -1308,9 +1151,6 @@ void DSPEmitter::mov(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -1322,7 +1162,6 @@ void DSPEmitter::mov(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::lsl16(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 areg = (opc >> 8) & 0x1;
|
||||
// s64 acc = dsp_get_long_acc(areg);
|
||||
get_long_acc(areg);
|
||||
|
@ -1335,9 +1174,6 @@ void DSPEmitter::lsl16(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// LSR16 $acR
|
||||
|
@ -1347,7 +1183,6 @@ void DSPEmitter::lsl16(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::lsr16(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 areg = (opc >> 8) & 0x1;
|
||||
|
||||
// u64 acc = dsp_get_long_acc(areg);
|
||||
|
@ -1363,9 +1198,6 @@ void DSPEmitter::lsr16(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ASR16 $acR
|
||||
|
@ -1375,7 +1207,6 @@ void DSPEmitter::lsr16(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::asr16(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 areg = (opc >> 11) & 0x1;
|
||||
|
||||
// s64 acc = dsp_get_long_acc(areg);
|
||||
|
@ -1389,9 +1220,6 @@ void DSPEmitter::asr16(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// LSL $acR, #I
|
||||
|
@ -1401,7 +1229,6 @@ void DSPEmitter::asr16(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::lsl(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x01;
|
||||
u16 shift = opc & 0x3f;
|
||||
// u64 acc = dsp_get_long_acc(rreg);
|
||||
|
@ -1417,9 +1244,6 @@ void DSPEmitter::lsl(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// LSR $acR, #I
|
||||
|
@ -1430,7 +1254,6 @@ void DSPEmitter::lsl(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::lsr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x01;
|
||||
u16 shift;
|
||||
// u64 acc = dsp_get_long_acc(rreg);
|
||||
|
@ -1456,9 +1279,6 @@ void DSPEmitter::lsr(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ASL $acR, #I
|
||||
|
@ -1468,7 +1288,6 @@ void DSPEmitter::lsr(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::asl(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x01;
|
||||
u16 shift = opc & 0x3f;
|
||||
// u64 acc = dsp_get_long_acc(rreg);
|
||||
|
@ -1482,9 +1301,6 @@ void DSPEmitter::asl(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ASR $acR, #I
|
||||
|
@ -1495,7 +1311,6 @@ void DSPEmitter::asl(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::asr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x01;
|
||||
u16 shift;
|
||||
|
||||
|
@ -1517,9 +1332,6 @@ void DSPEmitter::asr(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// LSRN (fixed parameters)
|
||||
|
@ -1530,7 +1342,6 @@ void DSPEmitter::asr(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::lsrn(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
// s16 shift;
|
||||
// u16 accm = (u16)dsp_get_acc_m(1);
|
||||
get_acc_m(1);
|
||||
|
@ -1580,9 +1391,6 @@ void DSPEmitter::lsrn(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64(RDX);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ASRN (fixed parameters)
|
||||
|
@ -1593,7 +1401,6 @@ void DSPEmitter::lsrn(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::asrn(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
// s16 shift;
|
||||
// u16 accm = (u16)dsp_get_acc_m(1);
|
||||
get_acc_m(1);
|
||||
|
@ -1638,9 +1445,6 @@ void DSPEmitter::asrn(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64(RDX);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// LSRNRX $acD, $axS.h
|
||||
|
@ -1651,7 +1455,6 @@ void DSPEmitter::asrn(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::lsrnrx(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
|
||||
|
@ -1702,9 +1505,6 @@ void DSPEmitter::lsrnrx(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64(RDX);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ASRNRX $acD, $axS.h
|
||||
|
@ -1715,7 +1515,6 @@ void DSPEmitter::lsrnrx(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::asrnrx(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
|
||||
|
@ -1763,9 +1562,6 @@ void DSPEmitter::asrnrx(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64(RDX);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// LSRNR $acD
|
||||
|
@ -1776,7 +1572,6 @@ void DSPEmitter::asrnrx(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::lsrnr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
|
||||
// s16 shift;
|
||||
|
@ -1825,9 +1620,6 @@ void DSPEmitter::lsrnr(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64(RDX);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ASRNR $acD
|
||||
|
@ -1838,7 +1630,6 @@ void DSPEmitter::lsrnr(const UDSPInstruction opc)
|
|||
// flags out: --xx xx00
|
||||
void DSPEmitter::asrnr(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
|
||||
// s16 shift;
|
||||
|
@ -1884,9 +1675,6 @@ void DSPEmitter::asrnr(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64(RDX);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@ using namespace Gen;
|
|||
// Clobbers RDX
|
||||
void DSPEmitter::Update_SR_Register(Gen::X64Reg val)
|
||||
{
|
||||
#if _M_X86_64
|
||||
OpArg sr_reg;
|
||||
gpr.getReg(DSP_REG_SR,sr_reg);
|
||||
// // 0x04
|
||||
|
@ -50,21 +49,18 @@ void DSPEmitter::Update_SR_Register(Gen::X64Reg val)
|
|||
OR(16, sr_reg, Imm16(SR_TOP2BITS));
|
||||
SetJumpTarget(cC);
|
||||
gpr.putReg(DSP_REG_SR);
|
||||
#endif
|
||||
}
|
||||
|
||||
// In: RAX: s64 _Value
|
||||
// Clobbers RDX
|
||||
void DSPEmitter::Update_SR_Register64(Gen::X64Reg val)
|
||||
{
|
||||
#if _M_X86_64
|
||||
// g_dsp.r[DSP_REG_SR] &= ~SR_CMP_MASK;
|
||||
OpArg sr_reg;
|
||||
gpr.getReg(DSP_REG_SR,sr_reg);
|
||||
AND(16, sr_reg, Imm16(~SR_CMP_MASK));
|
||||
gpr.putReg(DSP_REG_SR);
|
||||
Update_SR_Register(val);
|
||||
#endif
|
||||
}
|
||||
|
||||
// In: (val): s64 _Value
|
||||
|
@ -72,7 +68,6 @@ void DSPEmitter::Update_SR_Register64(Gen::X64Reg val)
|
|||
// Clobbers RDX
|
||||
void DSPEmitter::Update_SR_Register64_Carry(X64Reg val, X64Reg carry_ovfl)
|
||||
{
|
||||
#if _M_X86_64
|
||||
OpArg sr_reg;
|
||||
gpr.getReg(DSP_REG_SR,sr_reg);
|
||||
// g_dsp.r[DSP_REG_SR] &= ~SR_CMP_MASK;
|
||||
|
@ -101,7 +96,6 @@ void DSPEmitter::Update_SR_Register64_Carry(X64Reg val, X64Reg carry_ovfl)
|
|||
|
||||
gpr.putReg(DSP_REG_SR);
|
||||
Update_SR_Register(val);
|
||||
#endif
|
||||
}
|
||||
|
||||
// In: (val): s64 _Value
|
||||
|
@ -109,7 +103,6 @@ void DSPEmitter::Update_SR_Register64_Carry(X64Reg val, X64Reg carry_ovfl)
|
|||
// Clobbers RDX
|
||||
void DSPEmitter::Update_SR_Register64_Carry2(X64Reg val, X64Reg carry_ovfl)
|
||||
{
|
||||
#if _M_X86_64
|
||||
OpArg sr_reg;
|
||||
gpr.getReg(DSP_REG_SR,sr_reg);
|
||||
// g_dsp.r[DSP_REG_SR] &= ~SR_CMP_MASK;
|
||||
|
@ -138,7 +131,6 @@ void DSPEmitter::Update_SR_Register64_Carry2(X64Reg val, X64Reg carry_ovfl)
|
|||
gpr.putReg(DSP_REG_SR);
|
||||
|
||||
Update_SR_Register();
|
||||
#endif
|
||||
}
|
||||
|
||||
//void DSPEmitter::Update_SR_Register16(s16 _Value, bool carry, bool overflow, bool overS32)
|
||||
|
@ -155,7 +147,6 @@ void DSPEmitter::Update_SR_Register64_Carry2(X64Reg val, X64Reg carry_ovfl)
|
|||
// Clobbers RDX
|
||||
void DSPEmitter::Update_SR_Register16(X64Reg val)
|
||||
{
|
||||
#if _M_X86_64
|
||||
OpArg sr_reg;
|
||||
gpr.getReg(DSP_REG_SR,sr_reg);
|
||||
AND(16, sr_reg, Imm16(~SR_CMP_MASK));
|
||||
|
@ -190,14 +181,12 @@ void DSPEmitter::Update_SR_Register16(X64Reg val)
|
|||
SetJumpTarget(notThree);
|
||||
SetJumpTarget(cC);
|
||||
gpr.putReg(DSP_REG_SR);
|
||||
#endif
|
||||
}
|
||||
|
||||
// In: RAX: s64 _Value
|
||||
// Clobbers RDX
|
||||
void DSPEmitter::Update_SR_Register16_OverS32(Gen::X64Reg val)
|
||||
{
|
||||
#if _M_X86_64
|
||||
OpArg sr_reg;
|
||||
gpr.getReg(DSP_REG_SR,sr_reg);
|
||||
AND(16, sr_reg, Imm16(~SR_CMP_MASK));
|
||||
|
@ -215,7 +204,6 @@ void DSPEmitter::Update_SR_Register16_OverS32(Gen::X64Reg val)
|
|||
// if ((((u16)_Value >> 14) == 0) || (((u16)_Value >> 14) == 3))
|
||||
//AND(32, R(val), Imm32(0xc0000000));
|
||||
Update_SR_Register16(val);
|
||||
#endif
|
||||
}
|
||||
|
||||
//void DSPEmitter::Update_SR_LZ(bool value) {
|
||||
|
|
|
@ -17,7 +17,6 @@ using namespace Gen;
|
|||
// In: RCX = s16 a, RAX = s16 b
|
||||
void DSPEmitter::multiply()
|
||||
{
|
||||
#if _M_X86_64
|
||||
// prod = (s16)a * (s16)b; //signed
|
||||
IMUL(64, R(ECX));
|
||||
|
||||
|
@ -32,35 +31,30 @@ void DSPEmitter::multiply()
|
|||
SetJumpTarget(noMult2);
|
||||
gpr.putReg(DSP_REG_SR, false);
|
||||
// return prod;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Returns s64 in RAX
|
||||
// Clobbers RDX
|
||||
void DSPEmitter::multiply_add()
|
||||
{
|
||||
#if _M_X86_64
|
||||
// s64 prod = dsp_get_long_prod() + dsp_get_multiply_prod(a, b, sign);
|
||||
multiply();
|
||||
MOV(64, R(RDX), R(RAX));
|
||||
get_long_prod();
|
||||
ADD(64, R(RAX), R(RDX));
|
||||
// return prod;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Returns s64 in RAX
|
||||
// Clobbers RDX
|
||||
void DSPEmitter::multiply_sub()
|
||||
{
|
||||
#if _M_X86_64
|
||||
// s64 prod = dsp_get_long_prod() - dsp_get_multiply_prod(a, b, sign);
|
||||
multiply();
|
||||
MOV(64, R(RDX), R(RAX));
|
||||
get_long_prod();
|
||||
SUB(64, R(RAX), R(RDX));
|
||||
// return prod;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Only MULX family instructions have unsigned/mixed support.
|
||||
|
@ -69,7 +63,6 @@ void DSPEmitter::multiply_sub()
|
|||
// Returns s64 in RAX
|
||||
void DSPEmitter::multiply_mulx(u8 axh0, u8 axh1)
|
||||
{
|
||||
#if _M_X86_64
|
||||
// s64 result;
|
||||
|
||||
// if ((axh0==0) && (axh1==0))
|
||||
|
@ -141,7 +134,6 @@ void DSPEmitter::multiply_mulx(u8 axh0, u8 axh1)
|
|||
SetJumpTarget(noMult2);
|
||||
gpr.putReg(DSP_REG_SR, false);
|
||||
// return prod;
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -169,7 +161,6 @@ void DSPEmitter::clrp(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::tstprod(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
if (FlagsNeeded())
|
||||
{
|
||||
// s64 prod = dsp_get_long_prod();
|
||||
|
@ -177,9 +168,6 @@ void DSPEmitter::tstprod(const UDSPInstruction opc)
|
|||
// Update_SR_Register64(prod);
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -191,7 +179,6 @@ void DSPEmitter::tstprod(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::movp(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
|
||||
// s64 acc = dsp_get_long_prod();
|
||||
|
@ -203,9 +190,6 @@ void DSPEmitter::movp(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MOVNP $acD
|
||||
|
@ -216,7 +200,6 @@ void DSPEmitter::movp(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::movnp(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
|
||||
// s64 acc = -dsp_get_long_prod();
|
||||
|
@ -229,9 +212,6 @@ void DSPEmitter::movnp(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MOVPZ $acD
|
||||
|
@ -242,7 +222,6 @@ void DSPEmitter::movnp(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::movpz(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x01;
|
||||
|
||||
// s64 acc = dsp_get_long_prod_round_prodl();
|
||||
|
@ -254,9 +233,6 @@ void DSPEmitter::movpz(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ADDPAXZ $acD, $axS
|
||||
|
@ -267,7 +243,6 @@ void DSPEmitter::movpz(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::addpaxz(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 dreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
|
||||
|
@ -299,9 +274,6 @@ void DSPEmitter::addpaxz(const UDSPInstruction opc)
|
|||
set_long_acc(dreg, RAX);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -311,16 +283,12 @@ void DSPEmitter::addpaxz(const UDSPInstruction opc)
|
|||
// Multiply $ax0.h by $ax0.h
|
||||
void DSPEmitter::mulaxh(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
// s64 prod = dsp_multiply(dsp_get_ax_h(0), dsp_get_ax_h(0));
|
||||
dsp_op_read_reg(DSP_REG_AXH0, RCX, SIGN);
|
||||
MOV(64, R(RAX), R(RCX));
|
||||
multiply();
|
||||
// dsp_set_long_prod(prod);
|
||||
set_long_prod();
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -331,7 +299,6 @@ void DSPEmitter::mulaxh(const UDSPInstruction opc)
|
|||
// $axS.h of secondary accumulator $axS (treat them both as signed).
|
||||
void DSPEmitter::mul(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 sreg = (opc >> 11) & 0x1;
|
||||
|
||||
// u16 axl = dsp_get_ax_l(sreg);
|
||||
|
@ -342,9 +309,6 @@ void DSPEmitter::mul(const UDSPInstruction opc)
|
|||
multiply();
|
||||
// dsp_set_long_prod(prod);
|
||||
set_long_prod();
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MULAC $axS.l, $axS.h, $acR
|
||||
|
@ -356,7 +320,6 @@ void DSPEmitter::mul(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::mulac(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 11) & 0x1;
|
||||
|
||||
|
@ -382,9 +345,6 @@ void DSPEmitter::mulac(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MULMV $axS.l, $axS.h, $acR
|
||||
|
@ -396,7 +356,6 @@ void DSPEmitter::mulac(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::mulmv(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x1;
|
||||
|
||||
// s64 acc = dsp_get_long_prod();
|
||||
|
@ -411,9 +370,6 @@ void DSPEmitter::mulmv(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MULMVZ $axS.l, $axS.h, $acR
|
||||
|
@ -426,7 +382,6 @@ void DSPEmitter::mulmv(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::mulmvz(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x1;
|
||||
|
||||
// s64 acc = dsp_get_long_prod_round_prodl();
|
||||
|
@ -439,9 +394,6 @@ void DSPEmitter::mulmvz(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64(RDX);
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -452,7 +404,6 @@ void DSPEmitter::mulmvz(const UDSPInstruction opc)
|
|||
// Part is selected by S and T bits. Zero selects low part, one selects high part.
|
||||
void DSPEmitter::mulx(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 treg = ((opc >> 11) & 0x1);
|
||||
u8 sreg = ((opc >> 12) & 0x1);
|
||||
|
||||
|
@ -464,9 +415,6 @@ void DSPEmitter::mulx(const UDSPInstruction opc)
|
|||
multiply_mulx(sreg, treg);
|
||||
// dsp_set_long_prod(prod);
|
||||
set_long_prod();
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MULXAC $ax0.S, $ax1.T, $acR
|
||||
|
@ -478,7 +426,6 @@ void DSPEmitter::mulx(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::mulxac(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x1;
|
||||
u8 treg = (opc >> 11) & 0x1;
|
||||
u8 sreg = (opc >> 12) & 0x1;
|
||||
|
@ -506,9 +453,6 @@ void DSPEmitter::mulxac(const UDSPInstruction opc)
|
|||
Update_SR_Register64(tmp1);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MULXMV $ax0.S, $ax1.T, $acR
|
||||
|
@ -520,7 +464,6 @@ void DSPEmitter::mulxac(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::mulxmv(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = ((opc >> 8) & 0x1);
|
||||
u8 treg = (opc >> 11) & 0x1;
|
||||
u8 sreg = (opc >> 12) & 0x1;
|
||||
|
@ -546,9 +489,6 @@ void DSPEmitter::mulxmv(const UDSPInstruction opc)
|
|||
Update_SR_Register64(tmp1);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MULXMV $ax0.S, $ax1.T, $acR
|
||||
|
@ -561,7 +501,6 @@ void DSPEmitter::mulxmv(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::mulxmvz(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x1;
|
||||
u8 treg = (opc >> 11) & 0x1;
|
||||
u8 sreg = (opc >> 12) & 0x1;
|
||||
|
@ -587,9 +526,6 @@ void DSPEmitter::mulxmvz(const UDSPInstruction opc)
|
|||
Update_SR_Register64(tmp1);
|
||||
}
|
||||
gpr.putXReg(tmp1);
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -600,7 +536,6 @@ void DSPEmitter::mulxmvz(const UDSPInstruction opc)
|
|||
// secondary accumulator $axS (treat them both as signed).
|
||||
void DSPEmitter::mulc(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 treg = (opc >> 11) & 0x1;
|
||||
u8 sreg = (opc >> 12) & 0x1;
|
||||
|
||||
|
@ -612,9 +547,6 @@ void DSPEmitter::mulc(const UDSPInstruction opc)
|
|||
multiply();
|
||||
// dsp_set_long_prod(prod);
|
||||
set_long_prod();
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MULCAC $acS.m, $axT.h, $acR
|
||||
|
@ -626,7 +558,6 @@ void DSPEmitter::mulc(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::mulcac(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x1;
|
||||
u8 treg = (opc >> 11) & 0x1;
|
||||
u8 sreg = (opc >> 12) & 0x1;
|
||||
|
@ -653,9 +584,6 @@ void DSPEmitter::mulcac(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MULCMV $acS.m, $axT.h, $acR
|
||||
|
@ -668,7 +596,6 @@ void DSPEmitter::mulcac(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::mulcmv(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x1;
|
||||
u8 treg = (opc >> 11) & 0x1;
|
||||
u8 sreg = (opc >> 12) & 0x1;
|
||||
|
@ -692,9 +619,6 @@ void DSPEmitter::mulcmv(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MULCMVZ $acS.m, $axT.h, $acR
|
||||
|
@ -708,7 +632,6 @@ void DSPEmitter::mulcmv(const UDSPInstruction opc)
|
|||
// flags out: --xx xx0x
|
||||
void DSPEmitter::mulcmvz(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 rreg = (opc >> 8) & 0x1;
|
||||
u8 treg = (opc >> 11) & 0x1;
|
||||
u8 sreg = (opc >> 12) & 0x1;
|
||||
|
@ -732,9 +655,6 @@ void DSPEmitter::mulcmvz(const UDSPInstruction opc)
|
|||
{
|
||||
Update_SR_Register64();
|
||||
}
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
//----
|
||||
|
@ -746,7 +666,6 @@ void DSPEmitter::mulcmvz(const UDSPInstruction opc)
|
|||
// signed) and add result to product register.
|
||||
void DSPEmitter::maddx(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 treg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
|
||||
|
@ -758,9 +677,6 @@ void DSPEmitter::maddx(const UDSPInstruction opc)
|
|||
multiply_add();
|
||||
// dsp_set_long_prod(prod);
|
||||
set_long_prod();
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MSUBX $(0x18+S*2), $(0x19+T*2)
|
||||
|
@ -770,7 +686,6 @@ void DSPEmitter::maddx(const UDSPInstruction opc)
|
|||
// signed) and subtract result from product register.
|
||||
void DSPEmitter::msubx(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 treg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
|
||||
|
@ -782,9 +697,6 @@ void DSPEmitter::msubx(const UDSPInstruction opc)
|
|||
multiply_sub();
|
||||
// dsp_set_long_prod(prod);
|
||||
set_long_prod();
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MADDC $acS.m, $axT.h
|
||||
|
@ -794,7 +706,6 @@ void DSPEmitter::msubx(const UDSPInstruction opc)
|
|||
// register.
|
||||
void DSPEmitter::maddc(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 treg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
|
||||
|
@ -806,9 +717,6 @@ void DSPEmitter::maddc(const UDSPInstruction opc)
|
|||
multiply_add();
|
||||
// dsp_set_long_prod(prod);
|
||||
set_long_prod();
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MSUBC $acS.m, $axT.h
|
||||
|
@ -818,7 +726,6 @@ void DSPEmitter::maddc(const UDSPInstruction opc)
|
|||
// product register.
|
||||
void DSPEmitter::msubc(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 treg = (opc >> 8) & 0x1;
|
||||
u8 sreg = (opc >> 9) & 0x1;
|
||||
|
||||
|
@ -830,9 +737,6 @@ void DSPEmitter::msubc(const UDSPInstruction opc)
|
|||
multiply_sub();
|
||||
// dsp_set_long_prod(prod);
|
||||
set_long_prod();
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MADD $axS.l, $axS.h
|
||||
|
@ -842,7 +746,6 @@ void DSPEmitter::msubc(const UDSPInstruction opc)
|
|||
// result to product register.
|
||||
void DSPEmitter::madd(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 sreg = (opc >> 8) & 0x1;
|
||||
|
||||
// u16 axl = dsp_get_ax_l(sreg);
|
||||
|
@ -853,9 +756,6 @@ void DSPEmitter::madd(const UDSPInstruction opc)
|
|||
multiply_add();
|
||||
// dsp_set_long_prod(prod);
|
||||
set_long_prod();
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
||||
// MSUB $axS.l, $axS.h
|
||||
|
@ -865,7 +765,6 @@ void DSPEmitter::madd(const UDSPInstruction opc)
|
|||
// subtract result from product register.
|
||||
void DSPEmitter::msub(const UDSPInstruction opc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
u8 sreg = (opc >> 8) & 0x1;
|
||||
|
||||
// u16 axl = dsp_get_ax_l(sreg);
|
||||
|
@ -876,7 +775,4 @@ void DSPEmitter::msub(const UDSPInstruction opc)
|
|||
multiply_sub();
|
||||
// dsp_set_long_prod(prod);
|
||||
set_long_prod();
|
||||
#else
|
||||
Default(opc);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -62,13 +62,11 @@ static void *reg_ptr(int reg)
|
|||
case DSP_REG_AX0_32:
|
||||
case DSP_REG_AX1_32:
|
||||
return &g_dsp.r.ax[reg - DSP_REG_AX0_32].val;
|
||||
#if _M_X86_64
|
||||
case DSP_REG_ACC0_64:
|
||||
case DSP_REG_ACC1_64:
|
||||
return &g_dsp.r.ac[reg - DSP_REG_ACC0_64].val;
|
||||
case DSP_REG_PROD_64:
|
||||
return &g_dsp.r.prod.val;
|
||||
#endif
|
||||
default:
|
||||
_assert_msg_(DSPLLE, 0, "cannot happen");
|
||||
return nullptr;
|
||||
|
@ -101,7 +99,6 @@ DSPJitRegCache::DSPJitRegCache(DSPEmitter &_emitter)
|
|||
xregs[RSI].guest_reg = DSP_REG_NONE;
|
||||
xregs[RDI].guest_reg = DSP_REG_NONE;
|
||||
|
||||
#if _M_X86_64
|
||||
#ifdef STATIC_REG_ACCS
|
||||
xregs[R8].guest_reg = DSP_REG_STATIC; //acc0
|
||||
xregs[R9].guest_reg = DSP_REG_STATIC; //acc1
|
||||
|
@ -115,7 +112,6 @@ DSPJitRegCache::DSPJitRegCache(DSPEmitter &_emitter)
|
|||
xregs[R13].guest_reg = DSP_REG_NONE;
|
||||
xregs[R14].guest_reg = DSP_REG_NONE;
|
||||
xregs[R15].guest_reg = DSP_REG_NONE;
|
||||
#endif
|
||||
|
||||
for (unsigned int i = 0; i <= DSP_REG_MAX_MEM_BACKED; i++)
|
||||
{
|
||||
|
@ -135,7 +131,6 @@ DSPJitRegCache::DSPJitRegCache(DSPEmitter &_emitter)
|
|||
regs[i].size = 2;
|
||||
}
|
||||
//special composite registers
|
||||
#if _M_X86_64
|
||||
#ifdef STATIC_REG_ACCS
|
||||
regs[DSP_REG_ACC0_64].host_reg = R8;
|
||||
regs[DSP_REG_ACC1_64].host_reg = R9;
|
||||
|
@ -160,7 +155,6 @@ DSPJitRegCache::DSPJitRegCache(DSPEmitter &_emitter)
|
|||
regs[DSP_REG_PRODM].shift = 16;
|
||||
regs[DSP_REG_PRODH].shift = 32;
|
||||
regs[DSP_REG_PRODM2].shift = 48;
|
||||
#endif
|
||||
|
||||
for (unsigned int i = 0; i < 2; i++)
|
||||
{
|
||||
|
@ -375,7 +369,6 @@ void DSPJitRegCache::flushRegs()
|
|||
_assert_msg_(DSPLLE,
|
||||
xregs[RDI].guest_reg == DSP_REG_NONE,
|
||||
"wrong xreg state for %d", RDI);
|
||||
#if _M_X86_64
|
||||
#ifdef STATIC_REG_ACCS
|
||||
_assert_msg_(DSPLLE,
|
||||
xregs[R8].guest_reg == DSP_REG_STATIC,
|
||||
|
@ -409,7 +402,6 @@ void DSPJitRegCache::flushRegs()
|
|||
_assert_msg_(DSPLLE,
|
||||
xregs[R15].guest_reg == DSP_REG_NONE,
|
||||
"wrong xreg state for %d", R15);
|
||||
#endif
|
||||
|
||||
use_ctr = 0;
|
||||
}
|
||||
|
@ -428,11 +420,7 @@ void DSPJitRegCache::loadRegs(bool emit)
|
|||
|
||||
if (emit)
|
||||
{
|
||||
#if _M_X86_64
|
||||
emitter.MOV(64, M(&ebp_store), R(RBP));
|
||||
#else
|
||||
emitter.MOV(32, M(&ebp_store), R(EBP));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -455,11 +443,7 @@ void DSPJitRegCache::saveRegs()
|
|||
"register %x is still a simple reg", i);
|
||||
}
|
||||
|
||||
#if _M_X86_64
|
||||
emitter.MOV(64, R(RBP), M(&ebp_store));
|
||||
#else
|
||||
emitter.MOV(32, R(EBP), M(&ebp_store));
|
||||
#endif
|
||||
}
|
||||
|
||||
void DSPJitRegCache::pushRegs()
|
||||
|
@ -482,17 +466,10 @@ void DSPJitRegCache::pushRegs()
|
|||
}
|
||||
|
||||
//hardcoding alignment to 16 bytes
|
||||
#if _M_X86_64
|
||||
if (push_count & 1)
|
||||
{
|
||||
emitter.SUB(64,R(RSP),Imm32(8));
|
||||
}
|
||||
#else
|
||||
if (push_count & 3)
|
||||
{
|
||||
emitter.SUB(32,R(ESP),Imm32(16 - 4 * (push_count & 3)));
|
||||
}
|
||||
#endif
|
||||
|
||||
for (unsigned int i = 0; i < NUMXREGS; i++)
|
||||
{
|
||||
|
@ -519,19 +496,11 @@ void DSPJitRegCache::pushRegs()
|
|||
"register %x is still used", i);
|
||||
}
|
||||
|
||||
#if _M_X86_64
|
||||
emitter.MOV(64, R(RBP), M(&ebp_store));
|
||||
#else
|
||||
emitter.MOV(32, R(EBP), M(&ebp_store));
|
||||
#endif
|
||||
}
|
||||
|
||||
void DSPJitRegCache::popRegs() {
|
||||
#if _M_X86_64
|
||||
emitter.MOV(64, M(&ebp_store), R(RBP));
|
||||
#else
|
||||
emitter.MOV(32, M(&ebp_store), R(EBP));
|
||||
#endif
|
||||
int push_count = 0;
|
||||
for (X64CachedReg& xreg : xregs)
|
||||
{
|
||||
|
@ -552,17 +521,10 @@ void DSPJitRegCache::popRegs() {
|
|||
}
|
||||
|
||||
//hardcoding alignment to 16 bytes
|
||||
#if _M_X86_64
|
||||
if (push_count & 1)
|
||||
{
|
||||
emitter.ADD(64,R(RSP),Imm32(8));
|
||||
}
|
||||
#else
|
||||
if (push_count & 3)
|
||||
{
|
||||
emitter.ADD(32,R(ESP),Imm32(16 - 4 * (push_count & 3)));
|
||||
}
|
||||
#endif
|
||||
|
||||
for (unsigned int i = 0; i <= DSP_REG_MAX_MEM_BACKED; i++)
|
||||
{
|
||||
|
@ -589,11 +551,7 @@ X64Reg DSPJitRegCache::makeABICallSafe(X64Reg reg)
|
|||
emitter.INT3();
|
||||
}
|
||||
xregs[RBP].guest_reg = rbp_guest;
|
||||
#if _M_X86_64
|
||||
emitter.MOV(64,R(safe),R(reg));
|
||||
#else
|
||||
emitter.MOV(32,R(safe),R(reg));
|
||||
#endif
|
||||
return safe;
|
||||
}
|
||||
|
||||
|
@ -626,11 +584,9 @@ void DSPJitRegCache::movToHostReg(size_t reg, X64Reg host_reg, bool load)
|
|||
case 4:
|
||||
emitter.MOV(32, R(host_reg), regs[reg].loc);
|
||||
break;
|
||||
#if _M_X86_64
|
||||
case 8:
|
||||
emitter.MOV(64, R(host_reg), regs[reg].loc);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
_assert_msg_(DSPLLE, 0, "unsupported memory size");
|
||||
break;
|
||||
|
@ -698,11 +654,9 @@ void DSPJitRegCache::rotateHostReg(size_t reg, int shift, bool emit)
|
|||
case 4:
|
||||
emitter.ROR(32, regs[reg].loc, Imm8(shift - regs[reg].shift));
|
||||
break;
|
||||
#if _M_X86_64
|
||||
case 8:
|
||||
emitter.ROR(64, regs[reg].loc, Imm8(shift - regs[reg].shift));
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
else if (shift < regs[reg].shift && emit)
|
||||
|
@ -715,11 +669,9 @@ void DSPJitRegCache::rotateHostReg(size_t reg, int shift, bool emit)
|
|||
case 4:
|
||||
emitter.ROL(32, regs[reg].loc, Imm8(regs[reg].shift - shift));
|
||||
break;
|
||||
#if _M_X86_64
|
||||
case 8:
|
||||
emitter.ROL(64, regs[reg].loc, Imm8(regs[reg].shift - shift));
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
regs[reg].shift = shift;
|
||||
|
@ -770,11 +722,9 @@ void DSPJitRegCache::movToMemory(size_t reg)
|
|||
case 4:
|
||||
emitter.MOV(32, tmp, regs[reg].loc);
|
||||
break;
|
||||
#if _M_X86_64
|
||||
case 8:
|
||||
emitter.MOV(64, tmp, regs[reg].loc);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
_assert_msg_(DSPLLE, 0, "unsupported memory size");
|
||||
break;
|
||||
|
@ -837,7 +787,6 @@ void DSPJitRegCache::getReg(int reg, OpArg &oparg, bool load)
|
|||
//do some register specific fixup
|
||||
switch (reg)
|
||||
{
|
||||
#if _M_X86_64
|
||||
case DSP_REG_ACC0_64:
|
||||
case DSP_REG_ACC1_64:
|
||||
if (load)
|
||||
|
@ -848,7 +797,6 @@ void DSPJitRegCache::getReg(int reg, OpArg &oparg, bool load)
|
|||
emitter.SAR(64, oparg, Imm8(64-40));
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -876,22 +824,7 @@ void DSPJitRegCache::putReg(int reg, bool dirty)
|
|||
// (if at all)
|
||||
|
||||
// sign extend from the bottom 8 bits.
|
||||
#if _M_X86_32
|
||||
// cannot use movsx with SPL, BPL, SIL or DIL
|
||||
// on 32 bit
|
||||
if (oparg.GetSimpleReg() == RSP ||
|
||||
oparg.GetSimpleReg() == RBP ||
|
||||
oparg.GetSimpleReg() == RSI ||
|
||||
oparg.GetSimpleReg() == RDI)
|
||||
{
|
||||
emitter.SHL(16,oparg,Imm8(8));
|
||||
emitter.SAR(16,oparg,Imm8(8));
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
emitter.MOVSX(16, 8, oparg.GetSimpleReg(), oparg);
|
||||
}
|
||||
emitter.MOVSX(16, 8, oparg.GetSimpleReg(), oparg);
|
||||
}
|
||||
else if (oparg.IsImm())
|
||||
{
|
||||
|
@ -910,7 +843,6 @@ void DSPJitRegCache::putReg(int reg, bool dirty)
|
|||
}
|
||||
}
|
||||
break;
|
||||
#if _M_X86_64
|
||||
case DSP_REG_ACC0_64:
|
||||
case DSP_REG_ACC1_64:
|
||||
if (dirty)
|
||||
|
@ -919,7 +851,6 @@ void DSPJitRegCache::putReg(int reg, bool dirty)
|
|||
emitter.SAR(64, oparg, Imm8(64-40));
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -944,28 +875,18 @@ void DSPJitRegCache::readReg(int sreg, X64Reg host_dreg, DSPJitSignExtend extend
|
|||
case 2:
|
||||
switch (extend)
|
||||
{
|
||||
#if _M_X86_64
|
||||
case SIGN:
|
||||
emitter.MOVSX(64, 16, host_dreg, reg);
|
||||
break;
|
||||
case ZERO:
|
||||
emitter.MOVZX(64, 16, host_dreg, reg);
|
||||
break;
|
||||
#else
|
||||
case SIGN:
|
||||
emitter.MOVSX(32, 16, host_dreg, reg);
|
||||
break;
|
||||
case ZERO:
|
||||
emitter.MOVZX(32, 16, host_dreg, reg);
|
||||
break;
|
||||
#endif
|
||||
case NONE:
|
||||
emitter.MOV(16, R(host_dreg), reg);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
#if _M_X86_64
|
||||
switch (extend)
|
||||
{
|
||||
case SIGN:
|
||||
|
@ -978,15 +899,10 @@ void DSPJitRegCache::readReg(int sreg, X64Reg host_dreg, DSPJitSignExtend extend
|
|||
emitter.MOV(32, R(host_dreg), reg);
|
||||
break;
|
||||
}
|
||||
#else
|
||||
emitter.MOV(32, R(host_dreg), reg);
|
||||
#endif
|
||||
break;
|
||||
#if _M_X86_64
|
||||
case 8:
|
||||
emitter.MOV(64, R(host_dreg), reg);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
_assert_msg_(DSPLLE, 0, "unsupported memory size");
|
||||
break;
|
||||
|
@ -1008,7 +924,6 @@ void DSPJitRegCache::writeReg(int dreg, OpArg arg)
|
|||
case 4:
|
||||
emitter.MOV(32, reg, Imm32((u32) arg.offset));
|
||||
break;
|
||||
#if _M_X86_64
|
||||
case 8:
|
||||
if ((u32) arg.offset == arg.offset)
|
||||
{
|
||||
|
@ -1019,7 +934,6 @@ void DSPJitRegCache::writeReg(int dreg, OpArg arg)
|
|||
emitter.MOV(64, reg, Imm64(arg.offset));
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
_assert_msg_(DSPLLE, 0, "unsupported memory size");
|
||||
break;
|
||||
|
@ -1035,11 +949,9 @@ void DSPJitRegCache::writeReg(int dreg, OpArg arg)
|
|||
case 4:
|
||||
emitter.MOV(32, reg, arg);
|
||||
break;
|
||||
#if _M_X86_64
|
||||
case 8:
|
||||
emitter.MOV(64, reg, arg);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
_assert_msg_(DSPLLE, 0, "unsupported memory size");
|
||||
break;
|
||||
|
@ -1051,11 +963,7 @@ void DSPJitRegCache::writeReg(int dreg, OpArg arg)
|
|||
//ordered in order of prefered use
|
||||
//not all of these are actually available
|
||||
static X64Reg alloc_order[] = {
|
||||
#if _M_X86_64
|
||||
R8,R9,R10,R11,R12,R13,R14,R15,RSI,RDI,RBX,RCX,RDX,RAX,RBP
|
||||
#else
|
||||
ESI,EDI,EBX,ECX,EDX,EAX,EBP
|
||||
#endif
|
||||
};
|
||||
|
||||
X64Reg DSPJitRegCache::spillXReg()
|
||||
|
|
|
@ -12,14 +12,10 @@ enum DSPJitRegSpecial
|
|||
{
|
||||
DSP_REG_AX0_32 =32,
|
||||
DSP_REG_AX1_32 =33,
|
||||
#if _M_X86_64
|
||||
DSP_REG_ACC0_64 =34,
|
||||
DSP_REG_ACC1_64 =35,
|
||||
DSP_REG_PROD_64 =36,
|
||||
DSP_REG_MAX_MEM_BACKED = 36,
|
||||
#else
|
||||
DSP_REG_MAX_MEM_BACKED = 33,
|
||||
#endif
|
||||
|
||||
DSP_REG_USED =253,
|
||||
DSP_REG_STATIC =254,
|
||||
|
@ -33,11 +29,7 @@ enum DSPJitSignExtend
|
|||
NONE
|
||||
};
|
||||
|
||||
#if _M_X86_64
|
||||
#define NUMXREGS 16
|
||||
#else
|
||||
#define NUMXREGS 8
|
||||
#endif
|
||||
|
||||
class DSPJitRegCache
|
||||
{
|
||||
|
|
|
@ -25,11 +25,7 @@ void DSPEmitter::dsp_reg_stack_push(int stack_reg)
|
|||
gpr.getFreeXReg(tmp1);
|
||||
//g_dsp.reg_stack[stack_reg][g_dsp.reg_stack_ptr[stack_reg]] = g_dsp.r[DSP_REG_ST0 + stack_reg];
|
||||
MOV(16, R(tmp1), M(&g_dsp.r.st[stack_reg]));
|
||||
#if _M_X86_64
|
||||
MOVZX(64, 8, RAX, R(AL));
|
||||
#else
|
||||
MOVZX(32, 8, EAX, R(AL));
|
||||
#endif
|
||||
MOV(16, MComplex(EAX, EAX, 1,
|
||||
PtrOffset(&g_dsp.reg_stack[stack_reg][0],nullptr)), R(tmp1));
|
||||
gpr.putXReg(tmp1);
|
||||
|
@ -44,11 +40,7 @@ void DSPEmitter::dsp_reg_stack_pop(int stack_reg)
|
|||
MOV(8, R(AL), M(&g_dsp.reg_stack_ptr[stack_reg]));
|
||||
X64Reg tmp1;
|
||||
gpr.getFreeXReg(tmp1);
|
||||
#if _M_X86_64
|
||||
MOVZX(64, 8, RAX, R(AL));
|
||||
#else
|
||||
MOVZX(32, 8, EAX, R(AL));
|
||||
#endif
|
||||
MOV(16, R(tmp1), MComplex(EAX, EAX, 1,
|
||||
PtrOffset(&g_dsp.reg_stack[stack_reg][0],nullptr)));
|
||||
MOV(16, M(&g_dsp.r.st[stack_reg]), R(tmp1));
|
||||
|
@ -207,18 +199,10 @@ void DSPEmitter::dsp_op_read_reg_dont_saturate(int reg, Gen::X64Reg host_dreg, D
|
|||
switch (extend)
|
||||
{
|
||||
case SIGN:
|
||||
#if _M_X86_64
|
||||
MOVSX(64, 16, host_dreg, R(host_dreg));
|
||||
#else
|
||||
MOVSX(32, 16, host_dreg, R(host_dreg));
|
||||
#endif
|
||||
break;
|
||||
case ZERO:
|
||||
#if _M_X86_64
|
||||
MOVZX(64, 16, host_dreg, R(host_dreg));
|
||||
#else
|
||||
MOVZX(32, 16, host_dreg, R(host_dreg));
|
||||
#endif
|
||||
break;
|
||||
case NONE:
|
||||
default:
|
||||
|
@ -243,18 +227,10 @@ void DSPEmitter::dsp_op_read_reg(int reg, Gen::X64Reg host_dreg, DSPJitSignExten
|
|||
switch (extend)
|
||||
{
|
||||
case SIGN:
|
||||
#if _M_X86_64
|
||||
MOVSX(64, 16, host_dreg, R(host_dreg));
|
||||
#else
|
||||
MOVSX(32, 16, host_dreg, R(host_dreg));
|
||||
#endif
|
||||
break;
|
||||
case ZERO:
|
||||
#if _M_X86_64
|
||||
MOVZX(64, 16, host_dreg, R(host_dreg));
|
||||
#else
|
||||
MOVZX(32, 16, host_dreg, R(host_dreg));
|
||||
#endif
|
||||
break;
|
||||
case NONE:
|
||||
default:
|
||||
|
@ -265,12 +241,8 @@ void DSPEmitter::dsp_op_read_reg(int reg, Gen::X64Reg host_dreg, DSPJitSignExten
|
|||
case DSP_REG_ACM1:
|
||||
{
|
||||
//we already know this is ACCM0 or ACCM1
|
||||
#if _M_X86_64
|
||||
OpArg acc_reg;
|
||||
gpr.getReg(reg-DSP_REG_ACM0+DSP_REG_ACC0_64, acc_reg);
|
||||
#else
|
||||
gpr.readReg(reg, host_dreg, extend);
|
||||
#endif
|
||||
OpArg sr_reg;
|
||||
gpr.getReg(DSP_REG_SR,sr_reg);
|
||||
|
||||
|
@ -278,8 +250,6 @@ void DSPEmitter::dsp_op_read_reg(int reg, Gen::X64Reg host_dreg, DSPJitSignExten
|
|||
TEST(16, sr_reg, Imm16(SR_40_MODE_BIT));
|
||||
FixupBranch not_40bit = J_CC(CC_Z, true);
|
||||
|
||||
|
||||
#if _M_X86_64
|
||||
MOVSX(64,32,host_dreg,acc_reg);
|
||||
CMP(64,R(host_dreg),acc_reg);
|
||||
FixupBranch no_saturate = J_CC(CC_Z);
|
||||
|
@ -309,42 +279,6 @@ void DSPEmitter::dsp_op_read_reg(int reg, Gen::X64Reg host_dreg, DSPJitSignExten
|
|||
SetJumpTarget(done_negative);
|
||||
gpr.flushRegs(c);
|
||||
gpr.putReg(reg-DSP_REG_ACM0+DSP_REG_ACC0_64, false);
|
||||
#else
|
||||
DSPJitRegCache c2(gpr);
|
||||
gpr.putReg(DSP_REG_SR, false);
|
||||
X64Reg tmp1;
|
||||
gpr.getFreeXReg(tmp1);
|
||||
gpr.readReg(reg-DSP_REG_ACM0+DSP_REG_ACH0, tmp1, NONE);
|
||||
MOVSX(32,16,host_dreg,R(host_dreg));
|
||||
SHL(32, R(tmp1), Imm8(16));
|
||||
MOV(16,R(tmp1),R(host_dreg));
|
||||
CMP(32,R(host_dreg), R(tmp1));
|
||||
|
||||
FixupBranch no_saturate = J_CC(CC_Z);
|
||||
|
||||
CMP(32,R(tmp1),Imm32(0));
|
||||
FixupBranch negative = J_CC(CC_LE);
|
||||
|
||||
MOV(32,R(host_dreg),Imm32(0x7fff));//this works for all extend modes
|
||||
FixupBranch done_positive = J();
|
||||
|
||||
SetJumpTarget(negative);
|
||||
if (extend == NONE || extend == ZERO)
|
||||
MOV(32,R(host_dreg),Imm32(0x00008000));
|
||||
else
|
||||
MOV(32,R(host_dreg),Imm32(0xffff8000));
|
||||
FixupBranch done_negative = J();
|
||||
|
||||
SetJumpTarget(no_saturate);
|
||||
if (extend == ZERO)
|
||||
MOVZX(32,16,host_dreg,R(host_dreg));
|
||||
SetJumpTarget(done_positive);
|
||||
SetJumpTarget(done_negative);
|
||||
gpr.putXReg(tmp1);
|
||||
gpr.flushRegs(c2);
|
||||
SetJumpTarget(not_40bit);
|
||||
gpr.flushRegs(c);
|
||||
#endif
|
||||
|
||||
gpr.putReg(DSP_REG_SR, false);
|
||||
}
|
||||
|
@ -585,11 +519,7 @@ void DSPEmitter::dmem_write(X64Reg value)
|
|||
|
||||
// g_dsp.dram[addr & DSP_DRAM_MASK] = val;
|
||||
AND(16, R(EAX), Imm16(DSP_DRAM_MASK));
|
||||
#if _M_X86_64
|
||||
MOV(64, R(ECX), ImmPtr(g_dsp.dram));
|
||||
#else
|
||||
MOV(32, R(ECX), ImmPtr(g_dsp.dram));
|
||||
#endif
|
||||
MOV(16, MComplex(ECX, EAX, 2, 0), R(value));
|
||||
|
||||
FixupBranch end = J(true);
|
||||
|
@ -610,12 +540,8 @@ void DSPEmitter::dmem_write_imm(u16 address, X64Reg value)
|
|||
switch (address >> 12)
|
||||
{
|
||||
case 0x0: // 0xxx DRAM
|
||||
#if _M_X86_64
|
||||
MOV(64, R(RDX), ImmPtr(g_dsp.dram));
|
||||
MOV(16, MDisp(RDX, (address & DSP_DRAM_MASK)*2), R(value));
|
||||
#else
|
||||
MOV(16, M(&g_dsp.dram[address & DSP_DRAM_MASK]), R(value));
|
||||
#endif
|
||||
break;
|
||||
|
||||
case 0xf: // Fxxx HW regs
|
||||
|
@ -644,11 +570,7 @@ void DSPEmitter::imem_read(X64Reg address)
|
|||
FixupBranch irom = J_CC(CC_A);
|
||||
// return g_dsp.iram[addr & DSP_IRAM_MASK];
|
||||
AND(16, R(address), Imm16(DSP_IRAM_MASK));
|
||||
#if _M_X86_64
|
||||
MOV(64, R(ECX), ImmPtr(g_dsp.iram));
|
||||
#else
|
||||
MOV(32, R(ECX), ImmPtr(g_dsp.iram));
|
||||
#endif
|
||||
MOV(16, R(EAX), MComplex(ECX, address, 2, 0));
|
||||
|
||||
FixupBranch end = J();
|
||||
|
@ -656,11 +578,7 @@ void DSPEmitter::imem_read(X64Reg address)
|
|||
// else if (addr == 0x8)
|
||||
// return g_dsp.irom[addr & DSP_IROM_MASK];
|
||||
AND(16, R(address), Imm16(DSP_IROM_MASK));
|
||||
#if _M_X86_64
|
||||
MOV(64, R(ECX), ImmPtr(g_dsp.irom));
|
||||
#else
|
||||
MOV(32, R(ECX), ImmPtr(g_dsp.irom));
|
||||
#endif
|
||||
MOV(16, R(EAX), MComplex(ECX, address, 2, 0));
|
||||
|
||||
SetJumpTarget(end);
|
||||
|
@ -676,12 +594,8 @@ void DSPEmitter::dmem_read(X64Reg address)
|
|||
FixupBranch dram = J_CC(CC_A);
|
||||
// return g_dsp.dram[addr & DSP_DRAM_MASK];
|
||||
AND(32, R(address), Imm32(DSP_DRAM_MASK));
|
||||
#if _M_X86_64
|
||||
MOVZX(64, 16, address, R(address));
|
||||
MOV(64, R(ECX), ImmPtr(g_dsp.dram));
|
||||
#else
|
||||
MOV(32, R(ECX), ImmPtr(g_dsp.dram));
|
||||
#endif
|
||||
MOV(16, R(EAX), MComplex(ECX, address, 2, 0));
|
||||
|
||||
FixupBranch end = J(true);
|
||||
|
@ -691,12 +605,8 @@ void DSPEmitter::dmem_read(X64Reg address)
|
|||
FixupBranch ifx = J_CC(CC_A);
|
||||
// return g_dsp.coef[addr & DSP_COEF_MASK];
|
||||
AND(32, R(address), Imm32(DSP_COEF_MASK));
|
||||
#if _M_X86_64
|
||||
MOVZX(64, 16, address, R(address));
|
||||
MOV(64, R(ECX), ImmPtr(g_dsp.coef));
|
||||
#else
|
||||
MOV(32, R(ECX), ImmPtr(g_dsp.coef));
|
||||
#endif
|
||||
MOV(16, R(EAX), MComplex(ECX, address, 2, 0));
|
||||
|
||||
FixupBranch end2 = J(true);
|
||||
|
@ -718,21 +628,13 @@ void DSPEmitter::dmem_read_imm(u16 address)
|
|||
switch (address >> 12)
|
||||
{
|
||||
case 0x0: // 0xxx DRAM
|
||||
#if _M_X86_64
|
||||
MOV(64, R(RDX), ImmPtr(g_dsp.dram));
|
||||
MOV(16, R(EAX), MDisp(RDX, (address & DSP_DRAM_MASK)*2));
|
||||
#else
|
||||
MOV(16, R(EAX), M(&g_dsp.dram[address & DSP_DRAM_MASK]));
|
||||
#endif
|
||||
break;
|
||||
|
||||
case 0x1: // 1xxx COEF
|
||||
#if _M_X86_64
|
||||
MOV(64, R(RDX), ImmPtr(g_dsp.coef));
|
||||
MOV(16, R(EAX), MDisp(RDX, (address & DSP_COEF_MASK)*2));
|
||||
#else
|
||||
MOV(16, R(EAX), Imm16(g_dsp.coef[address & DSP_COEF_MASK]));
|
||||
#endif
|
||||
break;
|
||||
|
||||
case 0xf: // Fxxx HW regs
|
||||
|
@ -751,7 +653,6 @@ void DSPEmitter::dmem_read_imm(u16 address)
|
|||
// Returns s64 in RAX
|
||||
void DSPEmitter::get_long_prod(X64Reg long_prod)
|
||||
{
|
||||
#if _M_X86_64
|
||||
//s64 val = (s8)(u8)g_dsp.r[DSP_REG_PRODH];
|
||||
OpArg prod_reg;
|
||||
gpr.getReg(DSP_REG_PROD_64, prod_reg);
|
||||
|
@ -767,15 +668,12 @@ void DSPEmitter::get_long_prod(X64Reg long_prod)
|
|||
SHL(64, R(tmp), Imm8(16));
|
||||
ADD(64, R(long_prod), R(tmp));
|
||||
gpr.putXReg(tmp);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
// Returns s64 in RAX
|
||||
// Clobbers RCX
|
||||
void DSPEmitter::get_long_prod_round_prodl(X64Reg long_prod)
|
||||
{
|
||||
#if _M_X86_64
|
||||
//s64 prod = dsp_get_long_prod();
|
||||
get_long_prod(long_prod);
|
||||
|
||||
|
@ -796,7 +694,6 @@ void DSPEmitter::get_long_prod_round_prodl(X64Reg long_prod)
|
|||
SetJumpTarget(_ret);
|
||||
//return prod;
|
||||
gpr.putXReg(tmp);
|
||||
#endif
|
||||
}
|
||||
|
||||
// For accurate emulation, this is wrong - but the real prod registers behave
|
||||
|
@ -804,7 +701,6 @@ void DSPEmitter::get_long_prod_round_prodl(X64Reg long_prod)
|
|||
// In: RAX = s64 val
|
||||
void DSPEmitter::set_long_prod()
|
||||
{
|
||||
#if _M_X86_64
|
||||
X64Reg tmp;
|
||||
gpr.getFreeXReg(tmp);
|
||||
|
||||
|
@ -817,14 +713,12 @@ void DSPEmitter::set_long_prod()
|
|||
MOV(64, prod_reg, R(RAX));
|
||||
|
||||
gpr.putReg(DSP_REG_PROD_64, true);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Returns s64 in RAX
|
||||
// Clobbers RCX
|
||||
void DSPEmitter::round_long_acc(X64Reg long_acc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
//if (prod & 0x10000) prod = (prod + 0x8000) & ~0xffff;
|
||||
TEST(32, R(long_acc), Imm32(0x10000));
|
||||
FixupBranch jump = J_CC(CC_Z);
|
||||
|
@ -839,29 +733,24 @@ void DSPEmitter::round_long_acc(X64Reg long_acc)
|
|||
AND(64, R(long_acc), R(RCX));
|
||||
SetJumpTarget(_ret);
|
||||
//return prod;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Returns s64 in acc
|
||||
void DSPEmitter::get_long_acc(int _reg, X64Reg acc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
OpArg reg;
|
||||
gpr.getReg(DSP_REG_ACC0_64+_reg, reg);
|
||||
MOV(64, R(acc), reg);
|
||||
gpr.putReg(DSP_REG_ACC0_64+_reg, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
// In: acc = s64 val
|
||||
void DSPEmitter::set_long_acc(int _reg, X64Reg acc)
|
||||
{
|
||||
#if _M_X86_64
|
||||
OpArg reg;
|
||||
gpr.getReg(DSP_REG_ACC0_64+_reg, reg, false);
|
||||
MOV(64, reg, R(acc));
|
||||
gpr.putReg(DSP_REG_ACC0_64+_reg);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Returns s16 in AX
|
||||
|
|
|
@ -247,13 +247,8 @@ static void ImHere()
|
|||
if (ImHereLog)
|
||||
{
|
||||
if (!f)
|
||||
{
|
||||
#if _M_X86_64
|
||||
f.Open("log64.txt", "w");
|
||||
#else
|
||||
f.Open("log32.txt", "w");
|
||||
#endif
|
||||
}
|
||||
|
||||
fprintf(f.GetHandle(), "%08x\n", PC);
|
||||
}
|
||||
if (been_here.find(PC) != been_here.end())
|
||||
|
@ -651,12 +646,8 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitBloc
|
|||
OR(32, M((void *)&PowerPC::ppcState.Exceptions), Imm32(EXCEPTION_ISI));
|
||||
|
||||
// Remove the invalid instruction from the icache, forcing a recompile
|
||||
#if _M_X86_32
|
||||
MOV(32, M(jit->GetBlockCache()->GetICachePtr(js.compilerPC)), Imm32(JIT_ICACHE_INVALID_WORD));
|
||||
#else
|
||||
MOV(64, R(RAX), ImmPtr(jit->GetBlockCache()->GetICachePtr(js.compilerPC)));
|
||||
MOV(32,MatR(RAX),Imm32(JIT_ICACHE_INVALID_WORD));
|
||||
#endif
|
||||
|
||||
WriteExceptionExit();
|
||||
}
|
||||
|
|
|
@ -76,11 +76,7 @@ public:
|
|||
}
|
||||
|
||||
const char *GetName() override {
|
||||
#if _M_X86_64
|
||||
return "JIT64";
|
||||
#else
|
||||
return "JIT32";
|
||||
#endif
|
||||
}
|
||||
// Run!
|
||||
|
||||
|
|
|
@ -37,11 +37,10 @@ void Jit64AsmRoutineManager::Generate()
|
|||
{
|
||||
enterCode = AlignCode16();
|
||||
ABI_PushAllCalleeSavedRegsAndAdjustStack();
|
||||
#if _M_X86_64
|
||||
|
||||
// Two statically allocated registers.
|
||||
MOV(64, R(RBX), Imm64((u64)Memory::base));
|
||||
MOV(64, R(R15), Imm64((u64)jit->GetBlockCache()->GetCodePointers())); //It's below 2GB so 32 bits are good enough
|
||||
#endif
|
||||
|
||||
const u8* outerLoop = GetCodePtr();
|
||||
ABI_CallFunction(reinterpret_cast<void *>(&CoreTiming::Advance));
|
||||
|
@ -85,12 +84,9 @@ void Jit64AsmRoutineManager::Generate()
|
|||
no_mem = J_CC(CC_NZ);
|
||||
}
|
||||
AND(32, R(EAX), Imm32(JIT_ICACHE_MASK));
|
||||
#if _M_X86_32
|
||||
MOV(32, R(EAX), MDisp(EAX, (u32)jit->GetBlockCache()->iCache));
|
||||
#else
|
||||
MOV(64, R(RSI), Imm64((u64)jit->GetBlockCache()->iCache));
|
||||
MOV(32, R(EAX), MComplex(RSI, EAX, SCALE_1, 0));
|
||||
#endif
|
||||
|
||||
if (Core::g_CoreStartupParameter.bWii || Core::g_CoreStartupParameter.bMMU || Core::g_CoreStartupParameter.bTLBHack)
|
||||
{
|
||||
exit_mem = J();
|
||||
|
@ -101,12 +97,9 @@ void Jit64AsmRoutineManager::Generate()
|
|||
TEST(32, R(EAX), Imm32(JIT_ICACHE_VMEM_BIT));
|
||||
FixupBranch no_vmem = J_CC(CC_Z);
|
||||
AND(32, R(EAX), Imm32(JIT_ICACHE_MASK));
|
||||
#if _M_X86_32
|
||||
MOV(32, R(EAX), MDisp(EAX, (u32)jit->GetBlockCache()->iCacheVMEM));
|
||||
#else
|
||||
MOV(64, R(RSI), Imm64((u64)jit->GetBlockCache()->iCacheVMEM));
|
||||
MOV(32, R(EAX), MComplex(RSI, EAX, SCALE_1, 0));
|
||||
#endif
|
||||
|
||||
if (Core::g_CoreStartupParameter.bWii) exit_vmem = J();
|
||||
SetJumpTarget(no_vmem);
|
||||
}
|
||||
|
@ -115,12 +108,9 @@ void Jit64AsmRoutineManager::Generate()
|
|||
TEST(32, R(EAX), Imm32(JIT_ICACHE_EXRAM_BIT));
|
||||
FixupBranch no_exram = J_CC(CC_Z);
|
||||
AND(32, R(EAX), Imm32(JIT_ICACHEEX_MASK));
|
||||
#if _M_X86_32
|
||||
MOV(32, R(EAX), MDisp(EAX, (u32)jit->GetBlockCache()->iCacheEx));
|
||||
#else
|
||||
MOV(64, R(RSI), Imm64((u64)jit->GetBlockCache()->iCacheEx));
|
||||
MOV(32, R(EAX), MComplex(RSI, EAX, SCALE_1, 0));
|
||||
#endif
|
||||
|
||||
SetJumpTarget(no_exram);
|
||||
}
|
||||
if (Core::g_CoreStartupParameter.bWii || Core::g_CoreStartupParameter.bMMU || Core::g_CoreStartupParameter.bTLBHack)
|
||||
|
@ -136,24 +126,13 @@ void Jit64AsmRoutineManager::Generate()
|
|||
ADD(32, M(&PowerPC::ppcState.DebugCount), Imm8(1));
|
||||
}
|
||||
//grab from list and jump to it
|
||||
#if _M_X86_32
|
||||
MOV(32, R(EDX), ImmPtr(jit->GetBlockCache()->GetCodePointers()));
|
||||
JMPptr(MComplex(EDX, EAX, 4, 0));
|
||||
#else
|
||||
JMPptr(MComplex(R15, RAX, 8, 0));
|
||||
#endif
|
||||
SetJumpTarget(notfound);
|
||||
|
||||
//Ok, no block, let's jit
|
||||
#if _M_X86_32
|
||||
ABI_AlignStack(4);
|
||||
PUSH(32, M(&PowerPC::ppcState.pc));
|
||||
CALL(reinterpret_cast<void *>(&Jit));
|
||||
ABI_RestoreStack(4);
|
||||
#else
|
||||
MOV(32, R(ABI_PARAM1), M(&PowerPC::ppcState.pc));
|
||||
CALL((void *)&Jit);
|
||||
#endif
|
||||
|
||||
JMP(dispatcherNoCheck); // no point in special casing this
|
||||
|
||||
SetJumpTarget(bail);
|
||||
|
|
|
@ -172,14 +172,10 @@ const int* GPRRegCache::GetAllocationOrder(size_t& count)
|
|||
static const int allocationOrder[] =
|
||||
{
|
||||
// R12, when used as base register, for example in a LEA, can generate bad code! Need to look into this.
|
||||
#if _M_X86_64
|
||||
#ifdef _WIN32
|
||||
RSI, RDI, R13, R14, R8, R9, R10, R11, R12, //, RCX
|
||||
#else
|
||||
RBP, R13, R14, R8, R9, R10, R11, R12, //, RCX
|
||||
#endif
|
||||
#elif _M_X86_32
|
||||
ESI, EDI, EBX, EBP, EDX, ECX,
|
||||
#endif
|
||||
};
|
||||
count = sizeof(allocationOrder) / sizeof(const int);
|
||||
|
@ -190,11 +186,7 @@ const int* FPURegCache::GetAllocationOrder(size_t& count)
|
|||
{
|
||||
static const int allocationOrder[] =
|
||||
{
|
||||
#if _M_X86_64
|
||||
XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, XMM2, XMM3, XMM4, XMM5
|
||||
#elif _M_X86_32
|
||||
XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
||||
#endif
|
||||
};
|
||||
count = sizeof(allocationOrder) / sizeof(int);
|
||||
return allocationOrder;
|
||||
|
|
|
@ -33,11 +33,7 @@ struct X64CachedReg
|
|||
typedef int XReg;
|
||||
typedef int PReg;
|
||||
|
||||
#if _M_X86_64
|
||||
#define NUMXREGS 16
|
||||
#elif _M_X86_32
|
||||
#define NUMXREGS 8
|
||||
#endif
|
||||
|
||||
class RegCache
|
||||
{
|
||||
|
|
|
@ -1171,7 +1171,6 @@ void Jit64::divwux(UGeckoInstruction inst)
|
|||
if (((u64)(magic+1) * (max_quotient*divisor-1)) >> (shift + 32) != max_quotient-1)
|
||||
{
|
||||
// If failed, use slower round-down method
|
||||
#if _M_X86_64
|
||||
gpr.Lock(a, b, d);
|
||||
gpr.BindToRegister(d, d == a, true);
|
||||
MOV(32, R(EAX), Imm32(magic));
|
||||
|
@ -1180,24 +1179,10 @@ void Jit64::divwux(UGeckoInstruction inst)
|
|||
IMUL(64, gpr.RX(d), R(RAX));
|
||||
ADD(64, gpr.R(d), R(RAX));
|
||||
SHR(64, gpr.R(d), Imm8(shift+32));
|
||||
#else
|
||||
gpr.FlushLockX(EDX);
|
||||
gpr.Lock(a, b, d);
|
||||
gpr.BindToRegister(d, d == a, true);
|
||||
MOV(32, R(EAX), Imm32(magic));
|
||||
MUL(32, gpr.R(a));
|
||||
XOR(32, gpr.R(d), gpr.R(d));
|
||||
ADD(32, R(EAX), Imm32(magic));
|
||||
ADC(32, gpr.R(d), R(EDX));
|
||||
if (shift)
|
||||
SHR(32, gpr.R(d), Imm8(shift));
|
||||
gpr.UnlockAllX();
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
// If success, use faster round-up method
|
||||
#if _M_X86_64
|
||||
gpr.Lock(a, b, d);
|
||||
gpr.BindToRegister(a, true, false);
|
||||
gpr.BindToRegister(d, false, true);
|
||||
|
@ -1212,17 +1197,6 @@ void Jit64::divwux(UGeckoInstruction inst)
|
|||
IMUL(64, gpr.RX(d), gpr.R(a));
|
||||
}
|
||||
SHR(64, gpr.R(d), Imm8(shift+32));
|
||||
#else
|
||||
gpr.FlushLockX(EDX);
|
||||
gpr.Lock(a, b, d);
|
||||
gpr.BindToRegister(d, d == a, true);
|
||||
MOV(32, R(EAX), Imm32(magic+1));
|
||||
MUL(32, gpr.R(a));
|
||||
MOV(32, gpr.R(d), R(EDX));
|
||||
if (shift)
|
||||
SHR(32, gpr.R(d), Imm8(shift));
|
||||
gpr.UnlockAllX();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
if (inst.OE)
|
||||
|
@ -1753,7 +1727,6 @@ void Jit64::srwx(UGeckoInstruction inst)
|
|||
}
|
||||
else
|
||||
{
|
||||
#if _M_X86_64
|
||||
gpr.FlushLockX(ECX);
|
||||
gpr.Lock(a, b, s);
|
||||
gpr.BindToRegister(a, (a == b || a == s), true);
|
||||
|
@ -1765,23 +1738,6 @@ void Jit64::srwx(UGeckoInstruction inst)
|
|||
SHR(64, gpr.R(a), R(ECX));
|
||||
gpr.UnlockAll();
|
||||
gpr.UnlockAllX();
|
||||
#else
|
||||
gpr.FlushLockX(ECX);
|
||||
gpr.Lock(a, b, s);
|
||||
gpr.BindToRegister(a, (a == b || a == s), true);
|
||||
MOV(32, R(ECX), gpr.R(b));
|
||||
TEST(32, R(ECX), Imm32(32));
|
||||
if (a != s)
|
||||
{
|
||||
MOV(32, gpr.R(a), gpr.R(s));
|
||||
}
|
||||
FixupBranch branch = J_CC(CC_Z);
|
||||
XOR(32, gpr.R(a), gpr.R(a));
|
||||
SetJumpTarget(branch);
|
||||
SHR(32, gpr.R(a), R(ECX));
|
||||
gpr.UnlockAll();
|
||||
gpr.UnlockAllX();
|
||||
#endif
|
||||
}
|
||||
// Shift of 0 doesn't update flags, so compare manually just in case
|
||||
if (inst.Rc)
|
||||
|
@ -1809,7 +1765,6 @@ void Jit64::slwx(UGeckoInstruction inst)
|
|||
}
|
||||
else
|
||||
{
|
||||
#if _M_X86_64
|
||||
gpr.FlushLockX(ECX);
|
||||
gpr.Lock(a, b, s);
|
||||
gpr.BindToRegister(a, (a == b || a == s), true);
|
||||
|
@ -1830,28 +1785,6 @@ void Jit64::slwx(UGeckoInstruction inst)
|
|||
}
|
||||
gpr.UnlockAll();
|
||||
gpr.UnlockAllX();
|
||||
#else
|
||||
gpr.FlushLockX(ECX);
|
||||
gpr.Lock(a, b, s);
|
||||
gpr.BindToRegister(a, (a == b || a == s), true);
|
||||
MOV(32, R(ECX), gpr.R(b));
|
||||
TEST(32, R(ECX), Imm32(32));
|
||||
if (a != s)
|
||||
{
|
||||
MOV(32, gpr.R(a), gpr.R(s));
|
||||
}
|
||||
FixupBranch branch = J_CC(CC_Z);
|
||||
XOR(32, gpr.R(a), gpr.R(a));
|
||||
SetJumpTarget(branch);
|
||||
SHL(32, gpr.R(a), R(ECX));
|
||||
gpr.UnlockAll();
|
||||
gpr.UnlockAllX();
|
||||
// Shift of 0 doesn't update flags, so compare manually just in case
|
||||
if (inst.Rc)
|
||||
{
|
||||
ComputeRC(gpr.R(a));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1863,7 +1796,6 @@ void Jit64::srawx(UGeckoInstruction inst)
|
|||
int a = inst.RA;
|
||||
int b = inst.RB;
|
||||
int s = inst.RS;
|
||||
#if _M_X86_64
|
||||
gpr.Lock(a, s, b);
|
||||
gpr.FlushLockX(ECX);
|
||||
gpr.BindToRegister(a, (a == s || a == b), true);
|
||||
|
@ -1881,33 +1813,6 @@ void Jit64::srawx(UGeckoInstruction inst)
|
|||
SetJumpTarget(nocarry);
|
||||
gpr.UnlockAll();
|
||||
gpr.UnlockAllX();
|
||||
#else
|
||||
gpr.Lock(a, s, b);
|
||||
gpr.FlushLockX(ECX);
|
||||
gpr.BindToRegister(a, (a == s || a == b), true);
|
||||
JitClearCA();
|
||||
MOV(32, R(ECX), gpr.R(b));
|
||||
if (a != s)
|
||||
MOV(32, gpr.R(a), gpr.R(s));
|
||||
TEST(32, R(ECX), Imm32(32));
|
||||
FixupBranch topBitSet = J_CC(CC_NZ);
|
||||
XOR(32, R(EAX), R(EAX));
|
||||
SHRD(32, R(EAX), gpr.R(a), R(ECX));
|
||||
SAR(32, gpr.R(a), R(ECX));
|
||||
TEST(32, R(EAX), gpr.R(a));
|
||||
FixupBranch nocarry1 = J_CC(CC_Z);
|
||||
JitSetCA();
|
||||
FixupBranch end = J();
|
||||
SetJumpTarget(topBitSet);
|
||||
SAR(32, gpr.R(a), Imm8(31));
|
||||
FixupBranch nocarry2 = J_CC(CC_Z);
|
||||
JitSetCA();
|
||||
SetJumpTarget(end);
|
||||
SetJumpTarget(nocarry1);
|
||||
SetJumpTarget(nocarry2);
|
||||
gpr.UnlockAll();
|
||||
gpr.UnlockAllX();
|
||||
#endif
|
||||
if (inst.Rc) {
|
||||
ComputeRC(gpr.R(a));
|
||||
}
|
||||
|
|
|
@ -40,21 +40,17 @@ void Jit64::psq_st(UGeckoInstruction inst)
|
|||
MOV(32, gpr.R(a), R(ECX));
|
||||
MOVZX(32, 16, EAX, M(&PowerPC::ppcState.spr[SPR_GQR0 + inst.I]));
|
||||
MOVZX(32, 8, EDX, R(AL));
|
||||
|
||||
// FIXME: Fix ModR/M encoding to allow [EDX*4+disp32] without a base register!
|
||||
#if _M_X86_32
|
||||
int addr_scale = SCALE_4;
|
||||
#else
|
||||
int addr_scale = SCALE_8;
|
||||
#endif
|
||||
if (inst.W) {
|
||||
// One value
|
||||
PXOR(XMM0, R(XMM0)); // TODO: See if we can get rid of this cheaply by tweaking the code in the singleStore* functions.
|
||||
CVTSD2SS(XMM0, fpr.R(s));
|
||||
CALLptr(MScaled(EDX, addr_scale, (u32)(u64)asm_routines.singleStoreQuantized));
|
||||
CALLptr(MScaled(EDX, SCALE_8, (u32)(u64)asm_routines.singleStoreQuantized));
|
||||
} else {
|
||||
// Pair of values
|
||||
CVTPD2PS(XMM0, fpr.R(s));
|
||||
CALLptr(MScaled(EDX, addr_scale, (u32)(u64)asm_routines.pairedStoreQuantized));
|
||||
CALLptr(MScaled(EDX, SCALE_8, (u32)(u64)asm_routines.pairedStoreQuantized));
|
||||
}
|
||||
gpr.UnlockAll();
|
||||
gpr.UnlockAllX();
|
||||
|
@ -83,13 +79,9 @@ void Jit64::psq_l(UGeckoInstruction inst)
|
|||
MOVZX(32, 8, EDX, R(AL));
|
||||
if (inst.W)
|
||||
OR(32, R(EDX), Imm8(8));
|
||||
#if _M_X86_32
|
||||
int addr_scale = SCALE_4;
|
||||
#else
|
||||
int addr_scale = SCALE_8;
|
||||
#endif
|
||||
|
||||
ABI_AlignStack(0);
|
||||
CALLptr(MScaled(EDX, addr_scale, (u32)(u64)asm_routines.pairedLoadQuantized));
|
||||
CALLptr(MScaled(EDX, SCALE_8, (u32)(u64)asm_routines.pairedLoadQuantized));
|
||||
ABI_RestoreStack(0);
|
||||
|
||||
// MEMCHECK_START // FIXME: MMU does not work here because of unsafe memory access
|
||||
|
|
|
@ -137,7 +137,6 @@ static void fregSpill(RegInfo& RI, X64Reg reg) {
|
|||
}
|
||||
|
||||
// ECX is scratch, so we don't allocate it
|
||||
#if _M_X86_64
|
||||
|
||||
// 64-bit - calling conventions differ between linux & windows, so...
|
||||
#ifdef _WIN32
|
||||
|
@ -149,16 +148,6 @@ static const int RegAllocSize = sizeof(RegAllocOrder) / sizeof(X64Reg);
|
|||
static const X64Reg FRegAllocOrder[] = {XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, XMM2, XMM3, XMM4, XMM5};
|
||||
static const int FRegAllocSize = sizeof(FRegAllocOrder) / sizeof(X64Reg);
|
||||
|
||||
#else
|
||||
|
||||
// 32-bit
|
||||
static const X64Reg RegAllocOrder[] = {EDI, ESI, EBP, EBX, EDX, EAX};
|
||||
static const int RegAllocSize = sizeof(RegAllocOrder) / sizeof(X64Reg);
|
||||
static const X64Reg FRegAllocOrder[] = {XMM2, XMM3, XMM4, XMM5, XMM6, XMM7};
|
||||
static const int FRegAllocSize = sizeof(FRegAllocOrder) / sizeof(X64Reg);
|
||||
|
||||
#endif
|
||||
|
||||
static X64Reg regFindFreeReg(RegInfo& RI) {
|
||||
for (auto& reg : RegAllocOrder)
|
||||
if (RI.regs[reg] == nullptr)
|
||||
|
@ -256,13 +245,6 @@ static X64Reg fregEnsureInReg(RegInfo& RI, InstLoc I) {
|
|||
}
|
||||
|
||||
static void regSpillCallerSaved(RegInfo& RI) {
|
||||
#if _M_X86_32
|
||||
// 32-bit
|
||||
regSpill(RI, EDX);
|
||||
regSpill(RI, ECX);
|
||||
regSpill(RI, EAX);
|
||||
#else
|
||||
// 64-bit
|
||||
regSpill(RI, RCX);
|
||||
regSpill(RI, RDX);
|
||||
regSpill(RI, RSI);
|
||||
|
@ -271,7 +253,6 @@ static void regSpillCallerSaved(RegInfo& RI) {
|
|||
regSpill(RI, R9);
|
||||
regSpill(RI, R10);
|
||||
regSpill(RI, R11);
|
||||
#endif
|
||||
}
|
||||
|
||||
static X64Reg regUReg(RegInfo& RI, InstLoc I) {
|
||||
|
@ -1292,13 +1273,9 @@ static void DoWriteCode(IRBuilder* ibuild, JitIL* Jit, u32 exitAddress) {
|
|||
Jit->MOVZX(32, 16, EAX, M(((char *)&GQR(quantreg)) + 2));
|
||||
Jit->MOVZX(32, 8, EDX, R(AL));
|
||||
Jit->OR(32, R(EDX), Imm8(w << 3));
|
||||
#if _M_X86_32
|
||||
int addr_scale = SCALE_4;
|
||||
#else
|
||||
int addr_scale = SCALE_8;
|
||||
#endif
|
||||
|
||||
Jit->MOV(32, R(ECX), regLocForInst(RI, getOp1(I)));
|
||||
Jit->CALLptr(MScaled(EDX, addr_scale, (u32)(u64)(((JitIL *)jit)->asm_routines.pairedLoadQuantized)));
|
||||
Jit->CALLptr(MScaled(EDX, SCALE_8, (u32)(u64)(((JitIL *)jit)->asm_routines.pairedLoadQuantized)));
|
||||
Jit->MOVAPD(reg, R(XMM0));
|
||||
RI.fregs[reg] = I;
|
||||
regNormalRegClear(RI, I);
|
||||
|
@ -1342,14 +1319,10 @@ static void DoWriteCode(IRBuilder* ibuild, JitIL* Jit, u32 exitAddress) {
|
|||
u32 quantreg = *I >> 24;
|
||||
Jit->MOVZX(32, 16, EAX, M(&PowerPC::ppcState.spr[SPR_GQR0 + quantreg]));
|
||||
Jit->MOVZX(32, 8, EDX, R(AL));
|
||||
#if _M_X86_32
|
||||
int addr_scale = SCALE_4;
|
||||
#else
|
||||
int addr_scale = SCALE_8;
|
||||
#endif
|
||||
|
||||
Jit->MOV(32, R(ECX), regLocForInst(RI, getOp2(I)));
|
||||
Jit->MOVAPD(XMM0, fregLocForInst(RI, getOp1(I)));
|
||||
Jit->CALLptr(MScaled(EDX, addr_scale, (u32)(u64)(((JitIL *)jit)->asm_routines.pairedStoreQuantized)));
|
||||
Jit->CALLptr(MScaled(EDX, SCALE_8, (u32)(u64)(((JitIL *)jit)->asm_routines.pairedStoreQuantized)));
|
||||
if (RI.IInfo[I - RI.FirstI] & 4)
|
||||
fregClearInst(RI, getOp1(I));
|
||||
if (RI.IInfo[I - RI.FirstI] & 8)
|
||||
|
@ -1831,12 +1804,8 @@ static void DoWriteCode(IRBuilder* ibuild, JitIL* Jit, u32 exitAddress) {
|
|||
Jit->OR(32, M((void *)&PowerPC::ppcState.Exceptions), Imm32(EXCEPTION_ISI));
|
||||
|
||||
// Remove the invalid instruction from the icache, forcing a recompile
|
||||
#if _M_X86_32
|
||||
Jit->MOV(32, M(jit->GetBlockCache()->GetICachePtr(InstLoc)), Imm32(JIT_ICACHE_INVALID_WORD));
|
||||
#else
|
||||
Jit->MOV(64, R(RAX), ImmPtr(jit->GetBlockCache()->GetICachePtr(InstLoc)));
|
||||
Jit->MOV(32, MatR(RAX), Imm32(JIT_ICACHE_INVALID_WORD));
|
||||
#endif
|
||||
Jit->WriteExceptionExit();
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -346,11 +346,7 @@ static void ImHere()
|
|||
{
|
||||
if (!f)
|
||||
{
|
||||
#if _M_X86_64
|
||||
f.Open("log64.txt", "w");
|
||||
#else
|
||||
f.Open("log32.txt", "w");
|
||||
#endif
|
||||
}
|
||||
fprintf(f.GetHandle(), "%08x r0: %08x r5: %08x r6: %08x\n", PC, PowerPC::ppcState.gpr[0],
|
||||
PowerPC::ppcState.gpr[5], PowerPC::ppcState.gpr[6]);
|
||||
|
|
|
@ -36,7 +36,6 @@
|
|||
#include "Core/PowerPC/JitILCommon/IR.h"
|
||||
#include "Core/PowerPC/JitILCommon/JitILBase.h"
|
||||
|
||||
|
||||
class JitIL : public JitILBase, public EmuCodeBlock
|
||||
{
|
||||
private:
|
||||
|
|
|
@ -126,7 +126,6 @@ void CommonAsmRoutines::GenQuantizedStores()
|
|||
UD2();
|
||||
const u8* storePairedFloat = AlignCode4();
|
||||
|
||||
#if _M_X86_64
|
||||
SHUFPS(XMM0, R(XMM0), 1);
|
||||
MOVQ_xmm(M(&psTemp[0]), XMM0);
|
||||
TEST(32, R(ECX), Imm32(0x0C000000));
|
||||
|
@ -140,27 +139,6 @@ void CommonAsmRoutines::GenQuantizedStores()
|
|||
ABI_PopRegistersAndAdjustStack(QUANTIZED_REGS_TO_SAVE, true);
|
||||
SetJumpTarget(skip_complex);
|
||||
RET();
|
||||
#else
|
||||
TEST(32, R(ECX), Imm32(0x0C000000));
|
||||
FixupBranch argh = J_CC(CC_NZ, true);
|
||||
MOVQ_xmm(M(&psTemp[0]), XMM0);
|
||||
MOV(32, R(EAX), M(&psTemp));
|
||||
BSWAP(32, EAX);
|
||||
AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK));
|
||||
MOV(32, MDisp(ECX, (u32)Memory::base), R(EAX));
|
||||
MOV(32, R(EAX), M(((char*)&psTemp) + 4));
|
||||
BSWAP(32, EAX);
|
||||
MOV(32, MDisp(ECX, 4+(u32)Memory::base), R(EAX));
|
||||
FixupBranch arg2 = J(true);
|
||||
SetJumpTarget(argh);
|
||||
SHUFPS(XMM0, R(XMM0), 1);
|
||||
MOVQ_xmm(M(&psTemp[0]), XMM0);
|
||||
ABI_PushRegistersAndAdjustStack(QUANTIZED_REGS_TO_SAVE, true);
|
||||
ABI_CallFunctionR((void *)&WriteDual32, ECX);
|
||||
ABI_PopRegistersAndAdjustStack(QUANTIZED_REGS_TO_SAVE, true);
|
||||
SetJumpTarget(arg2);
|
||||
RET();
|
||||
#endif
|
||||
|
||||
const u8* storePairedU8 = AlignCode4();
|
||||
SHR(32, R(EAX), Imm8(6));
|
||||
|
@ -343,64 +321,24 @@ void CommonAsmRoutines::GenQuantizedLoads()
|
|||
|
||||
const u8* loadPairedFloatTwo = AlignCode4();
|
||||
if (cpu_info.bSSSE3) {
|
||||
#if _M_X86_64
|
||||
MOVQ_xmm(XMM0, MComplex(RBX, RCX, 1, 0));
|
||||
#else
|
||||
AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK));
|
||||
MOVQ_xmm(XMM0, MDisp(ECX, (u32)Memory::base));
|
||||
#endif
|
||||
PSHUFB(XMM0, M((void *)pbswapShuffle2x4));
|
||||
} else {
|
||||
#if _M_X86_64
|
||||
LoadAndSwap(64, RCX, MComplex(RBX, RCX, 1, 0));
|
||||
ROL(64, R(RCX), Imm8(32));
|
||||
MOVQ_xmm(XMM0, R(RCX));
|
||||
#else
|
||||
#if 0
|
||||
AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK));
|
||||
MOVQ_xmm(XMM0, MDisp(ECX, (u32)Memory::base));
|
||||
PXOR(XMM1, R(XMM1));
|
||||
PSHUFLW(XMM0, R(XMM0), 0xB1);
|
||||
MOVAPD(XMM1, R(XMM0));
|
||||
PSRLW(XMM0, 8);
|
||||
PSLLW(XMM1, 8);
|
||||
POR(XMM0, R(XMM1));
|
||||
#else
|
||||
AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK));
|
||||
MOV(32, R(EAX), MDisp(ECX, (u32)Memory::base));
|
||||
BSWAP(32, EAX);
|
||||
MOV(32, M(&psTemp[0]), R(RAX));
|
||||
MOV(32, R(EAX), MDisp(ECX, (u32)Memory::base + 4));
|
||||
BSWAP(32, EAX);
|
||||
MOV(32, M(((float *)&psTemp[0]) + 1), R(RAX));
|
||||
MOVQ_xmm(XMM0, M(&psTemp[0]));
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
RET();
|
||||
|
||||
const u8* loadPairedFloatOne = AlignCode4();
|
||||
if (cpu_info.bSSSE3) {
|
||||
#if _M_X86_64
|
||||
MOVD_xmm(XMM0, MComplex(RBX, RCX, 1, 0));
|
||||
#else
|
||||
AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK));
|
||||
MOVD_xmm(XMM0, MDisp(ECX, (u32)Memory::base));
|
||||
#endif
|
||||
PSHUFB(XMM0, M((void *)pbswapShuffle1x4));
|
||||
UNPCKLPS(XMM0, M((void*)m_one));
|
||||
} else {
|
||||
#if _M_X86_64
|
||||
LoadAndSwap(32, RCX, MComplex(RBX, RCX, 1, 0));
|
||||
MOVD_xmm(XMM0, R(RCX));
|
||||
UNPCKLPS(XMM0, M((void*)m_one));
|
||||
#else
|
||||
AND(32, R(ECX), Imm32(Memory::MEMVIEW32_MASK));
|
||||
MOV(32, R(EAX), MDisp(ECX, (u32)Memory::base));
|
||||
BSWAP(32, EAX);
|
||||
MOVD_xmm(XMM0, R(EAX));
|
||||
UNPCKLPS(XMM0, M((void*)m_one));
|
||||
#endif
|
||||
}
|
||||
RET();
|
||||
|
||||
|
|
|
@ -19,24 +19,20 @@
|
|||
|
||||
using namespace Gen;
|
||||
|
||||
#if _M_X86_64
|
||||
extern u8 *trampolineCodePtr;
|
||||
|
||||
static void BackPatchError(const std::string &text, u8 *codePtr, u32 emAddress) {
|
||||
u64 code_addr = (u64)codePtr;
|
||||
disassembler disasm;
|
||||
char disbuf[256];
|
||||
memset(disbuf, 0, 256);
|
||||
#if _M_X86_32
|
||||
disasm.disasm32(0, code_addr, codePtr, disbuf);
|
||||
#else
|
||||
disasm.disasm64(0, code_addr, codePtr, disbuf);
|
||||
#endif
|
||||
PanicAlert("%s\n\n"
|
||||
"Error encountered accessing emulated address %08x.\n"
|
||||
"Culprit instruction: \n%s\nat %#" PRIx64,
|
||||
text.c_str(), emAddress, disbuf, code_addr);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
void TrampolineCache::Init()
|
||||
{
|
||||
|
@ -55,7 +51,6 @@ const u8 *TrampolineCache::GetReadTrampoline(const InstructionInfo &info, u32 re
|
|||
PanicAlert("Trampoline cache full");
|
||||
|
||||
const u8 *trampoline = GetCodePtr();
|
||||
#if _M_X86_64
|
||||
X64Reg addrReg = (X64Reg)info.scaledReg;
|
||||
X64Reg dataReg = (X64Reg)info.regOperandReg;
|
||||
|
||||
|
@ -96,7 +91,6 @@ const u8 *TrampolineCache::GetReadTrampoline(const InstructionInfo &info, u32 re
|
|||
|
||||
ABI_PopRegistersAndAdjustStack(registersInUse, true);
|
||||
RET();
|
||||
#endif
|
||||
return trampoline;
|
||||
}
|
||||
|
||||
|
@ -108,7 +102,6 @@ const u8 *TrampolineCache::GetWriteTrampoline(const InstructionInfo &info, u32 r
|
|||
|
||||
const u8 *trampoline = GetCodePtr();
|
||||
|
||||
#if _M_X86_64
|
||||
X64Reg dataReg = (X64Reg)info.regOperandReg;
|
||||
X64Reg addrReg = (X64Reg)info.scaledReg;
|
||||
|
||||
|
@ -158,7 +151,6 @@ const u8 *TrampolineCache::GetWriteTrampoline(const InstructionInfo &info, u32 r
|
|||
|
||||
ABI_PopRegistersAndAdjustStack(registersInUse, true);
|
||||
RET();
|
||||
#endif
|
||||
|
||||
return trampoline;
|
||||
}
|
||||
|
@ -170,7 +162,6 @@ const u8 *TrampolineCache::GetWriteTrampoline(const InstructionInfo &info, u32 r
|
|||
// that many of them in a typical program/game.
|
||||
const u8 *Jitx86Base::BackPatch(u8 *codePtr, u32 emAddress, void *ctx_void)
|
||||
{
|
||||
#if _M_X86_64
|
||||
SContext *ctx = (SContext *)ctx_void;
|
||||
|
||||
if (!jit->IsInCodeSpace(codePtr))
|
||||
|
@ -271,7 +262,4 @@ const u8 *Jitx86Base::BackPatch(u8 *codePtr, u32 emAddress, void *ctx_void)
|
|||
}
|
||||
return start;
|
||||
}
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -33,16 +33,6 @@ const int BACKPATCH_SIZE = 5;
|
|||
#define CTX_R14 R14
|
||||
#define CTX_R15 R15
|
||||
#define CTX_RIP Rip
|
||||
#elif _M_X86_32
|
||||
#define CTX_EAX Eax
|
||||
#define CTX_EBX Ebx
|
||||
#define CTX_ECX Ecx
|
||||
#define CTX_EDX Edx
|
||||
#define CTX_EDI Edi
|
||||
#define CTX_ESI Esi
|
||||
#define CTX_EBP Ebp
|
||||
#define CTX_ESP Esp
|
||||
#define CTX_EIP Eip
|
||||
#else
|
||||
#error No context definition for OS
|
||||
#endif
|
||||
|
@ -68,17 +58,6 @@ const int BACKPATCH_SIZE = 5;
|
|||
#define CTX_R14 __r14
|
||||
#define CTX_R15 __r15
|
||||
#define CTX_RIP __rip
|
||||
#elif _M_X86_32
|
||||
typedef x86_thread_state32_t SContext;
|
||||
#define CTX_EAX __eax
|
||||
#define CTX_EBX __ebx
|
||||
#define CTX_ECX __ecx
|
||||
#define CTX_EDX __edx
|
||||
#define CTX_EDI __edi
|
||||
#define CTX_ESI __esi
|
||||
#define CTX_EBP __ebp
|
||||
#define CTX_ESP __esp
|
||||
#define CTX_EIP __eip
|
||||
#else
|
||||
#error No context definition for OS
|
||||
#endif
|
||||
|
@ -104,32 +83,6 @@ const int BACKPATCH_SIZE = 5;
|
|||
#define CTX_R14 gregs[REG_R14]
|
||||
#define CTX_R15 gregs[REG_R15]
|
||||
#define CTX_RIP gregs[REG_RIP]
|
||||
#elif _M_X86_32
|
||||
#ifdef ANDROID
|
||||
#include <asm/sigcontext.h>
|
||||
typedef sigcontext SContext;
|
||||
#define CTX_EAX eax
|
||||
#define CTX_EBX ebx
|
||||
#define CTX_ECX ecx
|
||||
#define CTX_EDX edx
|
||||
#define CTX_EDI edi
|
||||
#define CTX_ESI esi
|
||||
#define CTX_EBP ebp
|
||||
#define CTX_ESP esp
|
||||
#define CTX_EIP eip
|
||||
#else
|
||||
#include <ucontext.h>
|
||||
typedef mcontext_t SContext;
|
||||
#define CTX_EAX gregs[REG_EAX]
|
||||
#define CTX_EBX gregs[REG_EBX]
|
||||
#define CTX_ECX gregs[REG_ECX]
|
||||
#define CTX_EDX gregs[REG_EDX]
|
||||
#define CTX_EDI gregs[REG_EDI]
|
||||
#define CTX_ESI gregs[REG_ESI]
|
||||
#define CTX_EBP gregs[REG_EBP]
|
||||
#define CTX_ESP gregs[REG_ESP]
|
||||
#define CTX_EIP gregs[REG_EIP]
|
||||
#endif
|
||||
#elif _M_ARM_32
|
||||
// Add others if required.
|
||||
typedef struct sigcontext SContext;
|
||||
|
@ -158,16 +111,6 @@ const int BACKPATCH_SIZE = 5;
|
|||
#define CTX_R14 __gregs[_REG_R14]
|
||||
#define CTX_R15 __gregs[_REG_R15]
|
||||
#define CTX_RIP __gregs[_REG_RIP]
|
||||
#elif _M_X86_32
|
||||
#define CTX_EAX __gregs[__REG_EAX]
|
||||
#define CTX_EBX __gregs[__REG_EBX]
|
||||
#define CTX_ECX __gregs[__REG_ECX]
|
||||
#define CTX_EDX __gregs[__REG_EDX]
|
||||
#define CTX_EDI __gregs[__REG_EDI]
|
||||
#define CTX_ESI __gregs[__REG_ESI]
|
||||
#define CTX_EBP __gregs[__REG_EBP]
|
||||
#define CTX_ESP __gregs[__REG_ESP]
|
||||
#define CTX_EIP __gregs[__REG_EIP]
|
||||
#else
|
||||
#error No context definition for OS
|
||||
#endif
|
||||
|
@ -192,16 +135,6 @@ const int BACKPATCH_SIZE = 5;
|
|||
#define CTX_R14 mc_r14
|
||||
#define CTX_R15 mc_r15
|
||||
#define CTX_RIP mc_rip
|
||||
#elif _M_X86_32
|
||||
#define CTX_EAX mc_eax
|
||||
#define CTX_EBX mc_ebx
|
||||
#define CTX_ECX mc_ecx
|
||||
#define CTX_EDX mc_edx
|
||||
#define CTX_EDI mc_edi
|
||||
#define CTX_ESI mc_esi
|
||||
#define CTX_EBP mc_ebp
|
||||
#define CTX_ESP mc_esp
|
||||
#define CTX_EIP mc_eip
|
||||
#else
|
||||
#error No context definition for OS
|
||||
#endif
|
||||
|
@ -233,8 +166,6 @@ static inline u64 *ContextRN(SContext* ctx, int n)
|
|||
};
|
||||
return (u64 *) ((char *) ctx + offsets[n]);
|
||||
}
|
||||
#elif _M_X86_32
|
||||
#define CTX_PC CTX_EIP
|
||||
#endif
|
||||
|
||||
class TrampolineCache : public Gen::X64CodeBlock
|
||||
|
|
|
@ -41,12 +41,7 @@ void EmuCodeBlock::SwapAndStore(int size, const Gen::OpArg& dst, Gen::X64Reg src
|
|||
|
||||
void EmuCodeBlock::UnsafeLoadRegToReg(X64Reg reg_addr, X64Reg reg_value, int accessSize, s32 offset, bool signExtend)
|
||||
{
|
||||
#if _M_X86_64
|
||||
MOVZX(32, accessSize, reg_value, MComplex(RBX, reg_addr, SCALE_1, offset));
|
||||
#else
|
||||
AND(32, R(reg_addr), Imm32(Memory::MEMVIEW32_MASK));
|
||||
MOVZX(32, accessSize, reg_value, MDisp(reg_addr, (u32)Memory::base + offset));
|
||||
#endif
|
||||
if (accessSize == 32)
|
||||
{
|
||||
BSWAP(32, reg_value);
|
||||
|
@ -68,18 +63,12 @@ void EmuCodeBlock::UnsafeLoadRegToReg(X64Reg reg_addr, X64Reg reg_value, int acc
|
|||
|
||||
void EmuCodeBlock::UnsafeLoadRegToRegNoSwap(X64Reg reg_addr, X64Reg reg_value, int accessSize, s32 offset)
|
||||
{
|
||||
#if _M_X86_64
|
||||
MOVZX(32, accessSize, reg_value, MComplex(RBX, reg_addr, SCALE_1, offset));
|
||||
#else
|
||||
AND(32, R(reg_addr), Imm32(Memory::MEMVIEW32_MASK));
|
||||
MOVZX(32, accessSize, reg_value, MDisp(reg_addr, (u32)Memory::base + offset));
|
||||
#endif
|
||||
}
|
||||
|
||||
u8 *EmuCodeBlock::UnsafeLoadToReg(X64Reg reg_value, Gen::OpArg opAddress, int accessSize, s32 offset, bool signExtend)
|
||||
{
|
||||
u8 *result;
|
||||
#if _M_X86_64
|
||||
if (opAddress.IsSimpleReg())
|
||||
{
|
||||
// Deal with potential wraparound. (This is just a heuristic, and it would
|
||||
|
@ -109,27 +98,6 @@ u8 *EmuCodeBlock::UnsafeLoadToReg(X64Reg reg_value, Gen::OpArg opAddress, int ac
|
|||
else
|
||||
MOVZX(64, accessSize, reg_value, MComplex(RBX, reg_value, SCALE_1, offset));
|
||||
}
|
||||
#else
|
||||
if (opAddress.IsImm())
|
||||
{
|
||||
result = GetWritableCodePtr();
|
||||
if (accessSize == 8 && signExtend)
|
||||
MOVSX(32, accessSize, reg_value, M(Memory::base + (((u32)opAddress.offset + offset) & Memory::MEMVIEW32_MASK)));
|
||||
else
|
||||
MOVZX(32, accessSize, reg_value, M(Memory::base + (((u32)opAddress.offset + offset) & Memory::MEMVIEW32_MASK)));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!opAddress.IsSimpleReg(reg_value))
|
||||
MOV(32, R(reg_value), opAddress);
|
||||
AND(32, R(reg_value), Imm32(Memory::MEMVIEW32_MASK));
|
||||
result = GetWritableCodePtr();
|
||||
if (accessSize == 8 && signExtend)
|
||||
MOVSX(32, accessSize, reg_value, MDisp(reg_value, (u32)Memory::base + offset));
|
||||
else
|
||||
MOVZX(32, accessSize, reg_value, MDisp(reg_value, (u32)Memory::base + offset));
|
||||
}
|
||||
#endif
|
||||
|
||||
switch (accessSize)
|
||||
{
|
||||
|
@ -281,7 +249,6 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress,
|
|||
{
|
||||
registersInUse &= ~(1 << RAX | 1 << reg_value);
|
||||
}
|
||||
#if _M_X86_64
|
||||
if (!Core::g_CoreStartupParameter.bMMU &&
|
||||
Core::g_CoreStartupParameter.bFastmem &&
|
||||
!opAddress.IsImm() &&
|
||||
|
@ -296,7 +263,6 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress,
|
|||
registersInUseAtLoc[mov] = registersInUse;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
u32 mem_mask = Memory::ADDR_MASK_HW_ACCESS;
|
||||
if (Core::g_CoreStartupParameter.bMMU || Core::g_CoreStartupParameter.bTLBHack)
|
||||
|
@ -411,7 +377,6 @@ u8 *EmuCodeBlock::UnsafeWriteRegToReg(X64Reg reg_value, X64Reg reg_addr, int acc
|
|||
if (accessSize == 8 && reg_value >= 4) {
|
||||
PanicAlert("WARNING: likely incorrect use of UnsafeWriteRegToReg!");
|
||||
}
|
||||
#if _M_X86_64
|
||||
result = GetWritableCodePtr();
|
||||
OpArg dest = MComplex(RBX, reg_addr, SCALE_1, offset);
|
||||
if (swap)
|
||||
|
@ -431,15 +396,6 @@ u8 *EmuCodeBlock::UnsafeWriteRegToReg(X64Reg reg_value, X64Reg reg_addr, int acc
|
|||
{
|
||||
MOV(accessSize, dest, R(reg_value));
|
||||
}
|
||||
#else
|
||||
if (swap)
|
||||
{
|
||||
BSWAP(accessSize, reg_value);
|
||||
}
|
||||
AND(32, R(reg_addr), Imm32(Memory::MEMVIEW32_MASK));
|
||||
result = GetWritableCodePtr();
|
||||
MOV(accessSize, MDisp(reg_addr, (u32)Memory::base + offset), R(reg_value));
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -447,7 +403,6 @@ u8 *EmuCodeBlock::UnsafeWriteRegToReg(X64Reg reg_value, X64Reg reg_addr, int acc
|
|||
void EmuCodeBlock::SafeWriteRegToReg(X64Reg reg_value, X64Reg reg_addr, int accessSize, s32 offset, u32 registersInUse, int flags)
|
||||
{
|
||||
registersInUse &= ~(1 << RAX);
|
||||
#if _M_X86_64
|
||||
if (!Core::g_CoreStartupParameter.bMMU &&
|
||||
Core::g_CoreStartupParameter.bFastmem &&
|
||||
!(flags & (SAFE_LOADSTORE_NO_SWAP | SAFE_LOADSTORE_NO_FASTMEM))
|
||||
|
@ -468,7 +423,6 @@ void EmuCodeBlock::SafeWriteRegToReg(X64Reg reg_value, X64Reg reg_addr, int acce
|
|||
registersInUseAtLoc[mov] = registersInUse;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (offset)
|
||||
ADD(32, R(reg_addr), Imm32((u32)offset));
|
||||
|
@ -517,17 +471,10 @@ void EmuCodeBlock::SafeWriteF32ToReg(X64Reg xmm_value, X64Reg reg_addr, s32 offs
|
|||
|
||||
void EmuCodeBlock::WriteToConstRamAddress(int accessSize, Gen::X64Reg arg, u32 address, bool swap)
|
||||
{
|
||||
#if _M_X86_64
|
||||
if (swap)
|
||||
SwapAndStore(accessSize, MDisp(RBX, address & 0x3FFFFFFF), arg);
|
||||
else
|
||||
MOV(accessSize, MDisp(RBX, address & 0x3FFFFFFF), R(arg));
|
||||
#else
|
||||
if (swap)
|
||||
SwapAndStore(accessSize, M((void*)(Memory::base + (address & Memory::MEMVIEW32_MASK))), arg);
|
||||
else
|
||||
MOV(accessSize, M((void*)(Memory::base + (address & Memory::MEMVIEW32_MASK))), R(arg));
|
||||
#endif
|
||||
}
|
||||
|
||||
void EmuCodeBlock::ForceSinglePrecisionS(X64Reg xmm) {
|
||||
|
@ -553,17 +500,10 @@ static u64 GC_ALIGNED16(temp64);
|
|||
|
||||
static const float GC_ALIGNED16(m_zero[]) = { 0.0f, 0.0f, 0.0f, 0.0f };
|
||||
|
||||
#if _M_X86_64
|
||||
static const __m128i GC_ALIGNED16(single_qnan_bit) = _mm_set_epi64x(0, 0x0000000000400000);
|
||||
static const __m128i GC_ALIGNED16(single_exponent) = _mm_set_epi64x(0, 0x000000007f800000);
|
||||
static const __m128i GC_ALIGNED16(double_qnan_bit) = _mm_set_epi64x(0, 0x0008000000000000);
|
||||
static const __m128i GC_ALIGNED16(double_exponent) = _mm_set_epi64x(0, 0x7ff0000000000000);
|
||||
#else
|
||||
static const __m128i GC_ALIGNED16(single_qnan_bit) = _mm_set_epi32(0, 0, 0x00000000, 0x00400000);
|
||||
static const __m128i GC_ALIGNED16(single_exponent) = _mm_set_epi32(0, 0, 0x00000000, 0x7f800000);
|
||||
static const __m128i GC_ALIGNED16(double_qnan_bit) = _mm_set_epi32(0, 0, 0x00080000, 0x00000000);
|
||||
static const __m128i GC_ALIGNED16(double_exponent) = _mm_set_epi32(0, 0, 0x7ff00000, 0x00000000);
|
||||
#endif
|
||||
|
||||
// Since the following float conversion functions are used in non-arithmetic PPC float instructions,
|
||||
// they must convert floats bitexact and never flush denormals to zero or turn SNaNs into QNaNs.
|
||||
|
@ -578,19 +518,11 @@ static const __m128i GC_ALIGNED16(double_exponent) = _mm_set_epi32(0, 0, 0x7ff00
|
|||
//#define MORE_ACCURATE_DOUBLETOSINGLE
|
||||
#ifdef MORE_ACCURATE_DOUBLETOSINGLE
|
||||
|
||||
#if _M_X86_64
|
||||
static const __m128i GC_ALIGNED16(double_fraction) = _mm_set_epi64x(0, 0x000fffffffffffff);
|
||||
static const __m128i GC_ALIGNED16(double_sign_bit) = _mm_set_epi64x(0, 0x8000000000000000);
|
||||
static const __m128i GC_ALIGNED16(double_explicit_top_bit) = _mm_set_epi64x(0, 0x0010000000000000);
|
||||
static const __m128i GC_ALIGNED16(double_top_two_bits) = _mm_set_epi64x(0, 0xc000000000000000);
|
||||
static const __m128i GC_ALIGNED16(double_bottom_bits) = _mm_set_epi64x(0, 0x07ffffffe0000000);
|
||||
#else
|
||||
static const __m128i GC_ALIGNED16(double_fraction) = _mm_set_epi32(0, 0, 0x000fffff, 0xffffffff);
|
||||
static const __m128i GC_ALIGNED16(double_sign_bit) = _mm_set_epi32(0, 0, 0x80000000, 0x00000000);
|
||||
static const __m128i GC_ALIGNED16(double_explicit_top_bit) = _mm_set_epi32(0, 0, 0x00100000, 0x00000000);
|
||||
static const __m128i GC_ALIGNED16(double_top_two_bits) = _mm_set_epi32(0, 0, 0xc0000000, 0x00000000);
|
||||
static const __m128i GC_ALIGNED16(double_bottom_bits) = _mm_set_epi32(0, 0, 0x07ffffff, 0xe0000000);
|
||||
#endif
|
||||
|
||||
// This is the same algorithm used in the interpreter (and actual hardware)
|
||||
// The documentation states that the conversion of a double with an outside the
|
||||
|
|
|
@ -7,43 +7,13 @@
|
|||
|
||||
#include <string>
|
||||
|
||||
#ifdef _WIN32
|
||||
#define PROFILER_QUERY_PERFORMANCE_COUNTER(pt)
|
||||
|
||||
#if _M_X86_32
|
||||
#define PROFILER_QUERY_PERFORMANCE_COUNTER(pt) \
|
||||
LEA(32, EAX, M(pt)); PUSH(EAX); \
|
||||
CALL(QueryPerformanceCounter)
|
||||
// TODO: r64 way
|
||||
// TODO: Implement generic ways to do this cleanly with all supported architectures
|
||||
// asm write : (u64) dt += t1-t0
|
||||
#define PROFILER_ADD_DIFF_LARGE_INTEGER(pdt, pt1, pt0) \
|
||||
MOV(32, R(EAX), M(pt1)); \
|
||||
SUB(32, R(EAX), M(pt0)); \
|
||||
MOV(32, R(ECX), M(((u8*)pt1) + 4)); \
|
||||
SBB(32, R(ECX), M(((u8*)pt0) + 4)); \
|
||||
ADD(32, R(EAX), M(pdt)); \
|
||||
MOV(32, R(EDX), M(((u8*)pdt) + 4)); \
|
||||
ADC(32, R(EDX), R(ECX)); \
|
||||
MOV(32, M(pdt), R(EAX)); \
|
||||
MOV(32, M(((u8*)pdt) + 4), R(EDX))
|
||||
|
||||
#define PROFILER_VPUSH PUSH(EAX);PUSH(ECX);PUSH(EDX)
|
||||
#define PROFILER_VPOP POP(EDX);POP(ECX);POP(EAX)
|
||||
|
||||
#else
|
||||
|
||||
#define PROFILER_QUERY_PERFORMANCE_COUNTER(pt)
|
||||
#define PROFILER_ADD_DIFF_LARGE_INTEGER(pdt, pt1, pt0)
|
||||
#define PROFILER_VPUSH
|
||||
#define PROFILER_VPOP
|
||||
#endif
|
||||
|
||||
#else
|
||||
// TODO
|
||||
#define PROFILER_QUERY_PERFORMANCE_COUNTER(pt)
|
||||
#define PROFILER_ADD_DIFF_LARGE_INTEGER(pdt, pt1, pt0)
|
||||
#define PROFILER_VPUSH
|
||||
#define PROFILER_VPOP
|
||||
#endif
|
||||
|
||||
struct BlockStat
|
||||
{
|
||||
|
|
|
@ -113,7 +113,6 @@ LONG NTAPI Handler(PEXCEPTION_POINTERS pPtrs)
|
|||
|
||||
void InstallExceptionHandler()
|
||||
{
|
||||
#if _M_X86_64
|
||||
// Make sure this is only called once per process execution
|
||||
// Instead, could make a Uninstall function, but whatever..
|
||||
static bool handlerInstalled = false;
|
||||
|
@ -122,7 +121,6 @@ void InstallExceptionHandler()
|
|||
|
||||
AddVectoredExceptionHandler(TRUE, Handler);
|
||||
handlerInstalled = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
#elif defined(__APPLE__)
|
||||
|
@ -135,7 +133,6 @@ void CheckKR(const char* name, kern_return_t kr)
|
|||
}
|
||||
}
|
||||
|
||||
#if _M_X86_64
|
||||
void ExceptionThread(mach_port_t port)
|
||||
{
|
||||
Common::SetCurrentThreadName("Mach exception thread");
|
||||
|
@ -226,13 +223,9 @@ void ExceptionThread(mach_port_t port)
|
|||
option |= MACH_SEND_MSG;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void InstallExceptionHandler()
|
||||
{
|
||||
#if _M_X86_32
|
||||
PanicAlertT("InstallExceptionHandler called, but this platform does not yet support it.");
|
||||
#else
|
||||
mach_port_t port;
|
||||
CheckKR("mach_port_allocate", mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port));
|
||||
std::thread exc_thread(ExceptionThread, port);
|
||||
|
@ -246,14 +239,12 @@ void InstallExceptionHandler()
|
|||
CheckKR("mach_port_mod_refs", mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, -1));
|
||||
mach_port_t previous;
|
||||
CheckKR("mach_port_request_notification", mach_port_request_notification(mach_task_self(), port, MACH_NOTIFY_NO_SENDERS, 0, port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous));
|
||||
#endif
|
||||
}
|
||||
|
||||
#elif !defined(ANDROID)
|
||||
|
||||
static void sigsegv_handler(int sig, siginfo_t *info, void *raw_context)
|
||||
{
|
||||
#ifndef _M_GENERIC
|
||||
if (sig != SIGSEGV)
|
||||
{
|
||||
// We are not interested in other signals - handle it as usual.
|
||||
|
@ -276,21 +267,16 @@ static void sigsegv_handler(int sig, siginfo_t *info, void *raw_context)
|
|||
// retry and crash
|
||||
signal(SIGSEGV, SIG_DFL);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void InstallExceptionHandler()
|
||||
{
|
||||
#if _M_X86_32
|
||||
PanicAlertT("InstallExceptionHandler called, but this platform does not yet support it.");
|
||||
#else
|
||||
struct sigaction sa;
|
||||
sa.sa_handler = nullptr;
|
||||
sa.sa_sigaction = &sigsegv_handler;
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
sigaction(SIGSEGV, &sa, nullptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -134,11 +134,7 @@ void CJitWindow::Compare(u32 em_address)
|
|||
int num_x86_instructions = 0;
|
||||
while ((u8*)disasmPtr < end)
|
||||
{
|
||||
#if _M_X86_64
|
||||
disasmPtr += x64disasm.disasm64(disasmPtr, disasmPtr, (u8*)disasmPtr, sptr);
|
||||
#else
|
||||
disasmPtr += x64disasm.disasm32(disasmPtr, disasmPtr, (u8*)disasmPtr, sptr);
|
||||
#endif
|
||||
sptr += strlen(sptr);
|
||||
*sptr++ = 13;
|
||||
*sptr++ = 10;
|
||||
|
|
|
@ -122,11 +122,9 @@ LONG WINAPI MyUnhandledExceptionFilter(LPEXCEPTION_POINTERS e) {
|
|||
//dumpCurrentDate(file);
|
||||
etfprintf(file.GetHandle(), "Unhandled Exception\n Code: 0x%08X\n",
|
||||
e->ExceptionRecord->ExceptionCode);
|
||||
#if _M_X86_32
|
||||
STACKTRACE2(file.GetHandle(), e->ContextRecord->Eip, e->ContextRecord->Esp, e->ContextRecord->Ebp);
|
||||
#else
|
||||
|
||||
STACKTRACE2(file.GetHandle(), e->ContextRecord->Rip, e->ContextRecord->Rsp, e->ContextRecord->Rbp);
|
||||
#endif
|
||||
|
||||
file.Close();
|
||||
_flushall();
|
||||
|
||||
|
|
|
@ -13,15 +13,7 @@
|
|||
#define NOMINMAX // Don't include windows min/max definitions
|
||||
#include <wx/wx.h> // wxWidgets
|
||||
|
||||
#if _M_X86_32
|
||||
|
||||
#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='x86' publicKeyToken='6595b64144ccf1df' language='*'\"")
|
||||
|
||||
#elif defined _M_IA64
|
||||
|
||||
#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='ia64' publicKeyToken='6595b64144ccf1df' language='*'\"")
|
||||
|
||||
#elif _M_X86_64
|
||||
#if defined _M_X86_64
|
||||
|
||||
#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='amd64' publicKeyToken='6595b64144ccf1df' language='*'\"")
|
||||
|
||||
|
|
|
@ -752,12 +752,8 @@ void VertexLoader::CompileVertexTranslator()
|
|||
|
||||
#ifdef USE_VERTEX_LOADER_JIT
|
||||
// End loop here
|
||||
#if _M_X86_64
|
||||
MOV(64, R(RAX), Imm64((u64)&loop_counter));
|
||||
SUB(32, MatR(RAX), Imm8(1));
|
||||
#else
|
||||
SUB(32, M(&loop_counter), Imm8(1));
|
||||
#endif
|
||||
|
||||
J_CC(CC_NZ, loop_start);
|
||||
ABI_PopAllCalleeSavedRegsAndAdjustStack();
|
||||
|
@ -768,12 +764,8 @@ void VertexLoader::CompileVertexTranslator()
|
|||
void VertexLoader::WriteCall(TPipelineFunction func)
|
||||
{
|
||||
#ifdef USE_VERTEX_LOADER_JIT
|
||||
#if _M_X86_64
|
||||
MOV(64, R(RAX), Imm64((u64)func));
|
||||
CALLptr(R(RAX));
|
||||
#else
|
||||
CALL((void*)func);
|
||||
#endif
|
||||
#else
|
||||
m_PipelineStages[m_numPipelineStages++] = func;
|
||||
#endif
|
||||
|
@ -783,24 +775,16 @@ void VertexLoader::WriteCall(TPipelineFunction func)
|
|||
void VertexLoader::WriteGetVariable(int bits, OpArg dest, void *address)
|
||||
{
|
||||
#ifdef USE_VERTEX_LOADER_JIT
|
||||
#if _M_X86_64
|
||||
MOV(64, R(RAX), Imm64((u64)address));
|
||||
MOV(bits, dest, MatR(RAX));
|
||||
#else
|
||||
MOV(bits, dest, M(address));
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void VertexLoader::WriteSetVariable(int bits, void *address, OpArg value)
|
||||
{
|
||||
#ifdef USE_VERTEX_LOADER_JIT
|
||||
#if _M_X86_64
|
||||
MOV(64, R(RAX), Imm64((u64)address));
|
||||
MOV(bits, MatR(RAX), value);
|
||||
#else
|
||||
MOV(bits, M(address), value);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -114,4 +114,4 @@
|
|||
</Lib>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup />
|
||||
</Project>
|
||||
</Project>
|
||||
|
|
Loading…
Reference in New Issue