Revert "Merge pull request #2358 from Tilka/pie"

This reverts commit 0f7f8f8774, reversing
changes made to 9f15054358.
This commit is contained in:
Scott Mansell 2015-07-06 09:05:25 +12:00
parent bd196e8c71
commit 0b0f70aad4
19 changed files with 186 additions and 119 deletions

View File

@ -11,7 +11,6 @@ option(USE_UPNP "Enables UPnP port mapping support" ON)
option(DISABLE_WX "Disable wxWidgets (use Qt or CLI interface)" OFF) option(DISABLE_WX "Disable wxWidgets (use Qt or CLI interface)" OFF)
option(ENABLE_QT "Enable Qt (use the experimental Qt interface)" OFF) option(ENABLE_QT "Enable Qt (use the experimental Qt interface)" OFF)
option(ENABLE_PCH "Use PCH to speed up compilation" ON) option(ENABLE_PCH "Use PCH to speed up compilation" ON)
option(ENABLE_PIE "Build a Position-Independent Executable (PIE)" ON)
option(ENABLE_LTO "Enables Link Time Optimization" OFF) option(ENABLE_LTO "Enables Link Time Optimization" OFF)
option(ENABLE_GENERIC "Enables generic build that should run on any little-endian host" OFF) option(ENABLE_GENERIC "Enables generic build that should run on any little-endian host" OFF)
@ -227,11 +226,6 @@ if(UNIX AND NOT APPLE)
check_and_add_flag(VISIBILITY_HIDDEN -fvisibility=hidden) check_and_add_flag(VISIBILITY_HIDDEN -fvisibility=hidden)
endif() endif()
if(ENABLE_PIE)
add_definitions(-fPIE)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie")
endif()
if(ENABLE_LTO) if(ENABLE_LTO)
check_and_add_flag(LTO -flto) check_and_add_flag(LTO -flto)
if(CMAKE_CXX_COMPILER_ID STREQUAL GNU) if(CMAKE_CXX_COMPILER_ID STREQUAL GNU)

View File

@ -28,10 +28,10 @@ public:
virtual ~CodeBlock() { if (region) FreeCodeSpace(); } virtual ~CodeBlock() { if (region) FreeCodeSpace(); }
// Call this before you generate any code. // Call this before you generate any code.
void AllocCodeSpace(int size, void* hint = nullptr) void AllocCodeSpace(int size)
{ {
region_size = size; region_size = size;
region = (u8*)AllocateExecutableMemory(region_size, hint); region = (u8*)AllocateExecutableMemory(region_size);
T::SetCodePtr(region); T::SetCodePtr(region);
} }

View File

@ -27,29 +27,71 @@
#endif #endif
#endif #endif
void* AllocateExecutableMemory(size_t size, void* map_hint) // Valgrind doesn't support MAP_32BIT.
// Uncomment the following line to be able to run Dolphin in Valgrind.
//#undef MAP_32BIT
#if !defined(_WIN32) && defined(_M_X86_64) && !defined(MAP_32BIT)
#include <unistd.h>
#define PAGE_MASK (getpagesize() - 1)
#define round_page(x) ((((unsigned long)(x)) + PAGE_MASK) & ~(PAGE_MASK))
#endif
// This is purposely not a full wrapper for virtualalloc/mmap, but it
// provides exactly the primitive operations that Dolphin needs.
void* AllocateExecutableMemory(size_t size, bool low)
{ {
#if defined(_WIN32) #if defined(_WIN32)
void* ptr = VirtualAlloc(0, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE); void* ptr = VirtualAlloc(0, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
#else #else
static char *map_hint = nullptr;
#if defined(_M_X86_64) && !defined(MAP_32BIT)
// This OS has no flag to enforce allocation below the 4 GB boundary,
// but if we hint that we want a low address it is very likely we will
// get one.
// An older version of this code used MAP_FIXED, but that has the side
// effect of discarding already mapped pages that happen to be in the
// requested virtual memory range (such as the emulated RAM, sometimes).
if (low && (!map_hint))
map_hint = (char*)round_page(512*1024*1024); /* 0.5 GB rounded up to the next page */
#endif
void* ptr = mmap(map_hint, size, PROT_READ | PROT_WRITE | PROT_EXEC, void* ptr = mmap(map_hint, size, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANON | MAP_PRIVATE, -1, 0); MAP_ANON | MAP_PRIVATE
#if defined(_M_X86_64) && defined(MAP_32BIT)
| (low ? MAP_32BIT : 0)
#endif
, -1, 0);
#endif /* defined(_WIN32) */ #endif /* defined(_WIN32) */
// printf("Mapped executable memory at %p (size %ld)\n", ptr,
// (unsigned long)size);
#ifdef _WIN32 #ifdef _WIN32
if (ptr == nullptr) if (ptr == nullptr)
{
#else #else
if (ptr == MAP_FAILED) if (ptr == MAP_FAILED)
#endif
{ {
ptr = nullptr; ptr = nullptr;
PanicAlert("Failed to allocate executable memory."); #endif
PanicAlert("Failed to allocate executable memory. If you are running Dolphin in Valgrind, try '#undef MAP_32BIT'.");
} }
#if !defined(_WIN32) && defined(_M_X86_64) && !defined(MAP_32BIT)
else
{
if (low)
{
map_hint += size;
map_hint = (char*)round_page(map_hint); /* round up to the next page */
// printf("Next map will (hopefully) be at %p\n", map_hint);
}
}
#endif
#ifdef _X86_64 #if _M_X86_64
ptrdiff_t ofs = (u8*)ptr - (u8*)map_hint; if ((u64)ptr >= 0x80000000 && low == true)
if (ofs < -0x80000000ll || ofs + size > 0x80000000ll) PanicAlert("Executable memory ended up above 2GB!");
PanicAlert("Executable range can't be used for RIP-relative addressing.");
#endif #endif
return ptr; return ptr;
@ -75,12 +117,18 @@ void* AllocateMemoryPages(size_t size)
void* AllocateAlignedMemory(size_t size, size_t alignment) void* AllocateAlignedMemory(size_t size, size_t alignment)
{ {
void* ptr = nullptr;
#ifdef _WIN32 #ifdef _WIN32
if (!(ptr = _aligned_malloc(size, alignment))) void* ptr = _aligned_malloc(size, alignment);
#else #else
if (posix_memalign(&ptr, alignment, size)) void* ptr = nullptr;
if (posix_memalign(&ptr, alignment, size) != 0)
ERROR_LOG(MEMMAP, "Failed to allocate aligned memory");
#endif #endif
// printf("Mapped memory at %p (size %ld)\n", ptr,
// (unsigned long)size);
if (ptr == nullptr)
PanicAlert("Failed to allocate aligned memory"); PanicAlert("Failed to allocate aligned memory");
return ptr; return ptr;
@ -88,12 +136,23 @@ void* AllocateAlignedMemory(size_t size, size_t alignment)
void FreeMemoryPages(void* ptr, size_t size) void FreeMemoryPages(void* ptr, size_t size)
{ {
if (ptr)
{
bool error_occurred = false;
#ifdef _WIN32 #ifdef _WIN32
if (ptr && !VirtualFree(ptr, 0, MEM_RELEASE)) if (!VirtualFree(ptr, 0, MEM_RELEASE))
error_occurred = true;
#else #else
if (ptr && munmap(ptr, size)) int retval = munmap(ptr, size);
if (retval != 0)
error_occurred = true;
#endif #endif
PanicAlert("FreeMemoryPages failed!\n%s", GetLastErrorMsg().c_str());
if (error_occurred)
PanicAlert("FreeMemoryPages failed!\n%s", GetLastErrorMsg().c_str());
}
} }
void FreeAlignedMemory(void* ptr) void FreeAlignedMemory(void* ptr)
@ -110,34 +169,58 @@ void FreeAlignedMemory(void* ptr)
void ReadProtectMemory(void* ptr, size_t size) void ReadProtectMemory(void* ptr, size_t size)
{ {
bool error_occurred = false;
#ifdef _WIN32 #ifdef _WIN32
DWORD oldValue; DWORD oldValue;
if (!VirtualProtect(ptr, size, PAGE_NOACCESS, &oldValue)) if (!VirtualProtect(ptr, size, PAGE_NOACCESS, &oldValue))
error_occurred = true;
#else #else
if (mprotect(ptr, size, PROT_NONE)) int retval = mprotect(ptr, size, PROT_NONE);
if (retval != 0)
error_occurred = true;
#endif #endif
if (error_occurred)
PanicAlert("ReadProtectMemory failed!\n%s", GetLastErrorMsg().c_str()); PanicAlert("ReadProtectMemory failed!\n%s", GetLastErrorMsg().c_str());
} }
void WriteProtectMemory(void* ptr, size_t size, bool allowExecute) void WriteProtectMemory(void* ptr, size_t size, bool allowExecute)
{ {
bool error_occurred = false;
#ifdef _WIN32 #ifdef _WIN32
DWORD oldValue; DWORD oldValue;
if (!VirtualProtect(ptr, size, allowExecute ? PAGE_EXECUTE_READ : PAGE_READONLY, &oldValue)) if (!VirtualProtect(ptr, size, allowExecute ? PAGE_EXECUTE_READ : PAGE_READONLY, &oldValue))
error_occurred = true;
#else #else
if (mprotect(ptr, size, PROT_READ | (allowExecute ? PROT_EXEC : 0))) int retval = mprotect(ptr, size, allowExecute ? (PROT_READ | PROT_EXEC) : PROT_READ);
if (retval != 0)
error_occurred = true;
#endif #endif
if (error_occurred)
PanicAlert("WriteProtectMemory failed!\n%s", GetLastErrorMsg().c_str()); PanicAlert("WriteProtectMemory failed!\n%s", GetLastErrorMsg().c_str());
} }
void UnWriteProtectMemory(void* ptr, size_t size, bool allowExecute) void UnWriteProtectMemory(void* ptr, size_t size, bool allowExecute)
{ {
bool error_occurred = false;
#ifdef _WIN32 #ifdef _WIN32
DWORD oldValue; DWORD oldValue;
if (!VirtualProtect(ptr, size, allowExecute ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE, &oldValue)) if (!VirtualProtect(ptr, size, allowExecute ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE, &oldValue))
error_occurred = true;
#else #else
if (mprotect(ptr, size, PROT_READ | PROT_WRITE | (allowExecute ? PROT_EXEC : 0))) int retval = mprotect(ptr, size, allowExecute ? (PROT_READ | PROT_WRITE | PROT_EXEC) : PROT_WRITE | PROT_READ);
if (retval != 0)
error_occurred = true;
#endif #endif
if (error_occurred)
PanicAlert("UnWriteProtectMemory failed!\n%s", GetLastErrorMsg().c_str()); PanicAlert("UnWriteProtectMemory failed!\n%s", GetLastErrorMsg().c_str());
} }
@ -153,8 +236,7 @@ std::string MemUsage()
// Print information about the memory usage of the process. // Print information about the memory usage of the process.
hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, processID); hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, processID);
if (nullptr == hProcess) if (nullptr == hProcess) return "MemUsage Error";
return "MemUsage Error";
if (GetProcessMemoryInfo(hProcess, &pmc, sizeof(pmc))) if (GetProcessMemoryInfo(hProcess, &pmc, sizeof(pmc)))
Ret = StringFromFormat("%s K", ThousandSeparate(pmc.WorkingSetSize / 1024, 7).c_str()); Ret = StringFromFormat("%s K", ThousandSeparate(pmc.WorkingSetSize / 1024, 7).c_str());

View File

@ -7,7 +7,7 @@
#include <cstddef> #include <cstddef>
#include <string> #include <string>
void* AllocateExecutableMemory(size_t size, void* map_hint); void* AllocateExecutableMemory(size_t size, bool low = true);
void* AllocateMemoryPages(size_t size); void* AllocateMemoryPages(size_t size);
void FreeMemoryPages(void* ptr, size_t size); void FreeMemoryPages(void* ptr, size_t size);
void* AllocateAlignedMemory(size_t size,size_t alignment); void* AllocateAlignedMemory(size_t size,size_t alignment);

View File

@ -218,17 +218,17 @@ inline OpArg M(const T* ptr) {return OpArg((u64)(const void*)ptr, (int)SCALE_
inline OpArg R(X64Reg value) {return OpArg(0, SCALE_NONE, value);} inline OpArg R(X64Reg value) {return OpArg(0, SCALE_NONE, value);}
inline OpArg MatR(X64Reg value) {return OpArg(0, SCALE_ATREG, value);} inline OpArg MatR(X64Reg value) {return OpArg(0, SCALE_ATREG, value);}
inline OpArg MDisp(X64Reg value, ptrdiff_t offset) inline OpArg MDisp(X64Reg value, int offset)
{ {
return OpArg(offset, SCALE_ATREG, value); return OpArg((u32)offset, SCALE_ATREG, value);
} }
inline OpArg MComplex(X64Reg base, X64Reg scaled, int scale, ptrdiff_t offset) inline OpArg MComplex(X64Reg base, X64Reg scaled, int scale, int offset)
{ {
return OpArg(offset, scale, base, scaled); return OpArg(offset, scale, base, scaled);
} }
inline OpArg MScaled(X64Reg scaled, int scale, ptrdiff_t offset) inline OpArg MScaled(X64Reg scaled, int scale, int offset)
{ {
if (scale == SCALE_1) if (scale == SCALE_1)
return OpArg(offset, SCALE_ATREG, scaled); return OpArg(offset, SCALE_ATREG, scaled);
@ -247,10 +247,17 @@ inline OpArg Imm32(u32 imm) {return OpArg(imm, SCALE_IMM32);}
inline OpArg Imm64(u64 imm) {return OpArg(imm, SCALE_IMM64);} inline OpArg Imm64(u64 imm) {return OpArg(imm, SCALE_IMM64);}
inline OpArg ImmPtr(const void* imm) {return Imm64((u64)imm);} inline OpArg ImmPtr(const void* imm) {return Imm64((u64)imm);}
inline bool FitsInS32(const ptrdiff_t distance) inline u32 PtrOffset(const void* ptr, const void* base)
{ {
return distance < 0x80000000LL && s64 distance = (s64)ptr-(s64)base;
distance >= -0x80000000LL; if (distance >= 0x80000000LL ||
distance < -0x80000000LL)
{
_assert_msg_(DYNA_REC, 0, "pointer offset out of range");
return 0;
}
return (u32)distance;
} }
//usage: int a[]; ARRAY_OFFSET(a,10) //usage: int a[]; ARRAY_OFFSET(a,10)

View File

@ -20,16 +20,14 @@ void DSPEmitter::dsp_reg_stack_push(int stack_reg)
AND(8, R(AL), Imm8(DSP_STACK_MASK)); AND(8, R(AL), Imm8(DSP_STACK_MASK));
MOV(8, M(&g_dsp.reg_stack_ptr[stack_reg]), R(AL)); MOV(8, M(&g_dsp.reg_stack_ptr[stack_reg]), R(AL));
X64Reg tmp1, tmp2; X64Reg tmp1;
gpr.getFreeXReg(tmp1); gpr.getFreeXReg(tmp1);
gpr.getFreeXReg(tmp2);
//g_dsp.reg_stack[stack_reg][g_dsp.reg_stack_ptr[stack_reg]] = g_dsp.r[DSP_REG_ST0 + stack_reg]; //g_dsp.reg_stack[stack_reg][g_dsp.reg_stack_ptr[stack_reg]] = g_dsp.r[DSP_REG_ST0 + stack_reg];
MOV(16, R(tmp1), M(&g_dsp.r.st[stack_reg])); MOV(16, R(tmp1), M(&g_dsp.r.st[stack_reg]));
MOVZX(64, 8, RAX, R(AL)); MOVZX(64, 8, RAX, R(AL));
MOV(64, R(tmp2), ImmPtr(g_dsp.reg_stack[stack_reg])); MOV(16, MComplex(EAX, EAX, 1,
MOV(16, MComplex(tmp2, EAX, SCALE_2, 0), R(tmp1)); PtrOffset(&g_dsp.reg_stack[stack_reg][0],nullptr)), R(tmp1));
gpr.putXReg(tmp1); gpr.putXReg(tmp1);
gpr.putXReg(tmp2);
} }
//clobbers: //clobbers:
@ -39,15 +37,13 @@ void DSPEmitter::dsp_reg_stack_pop(int stack_reg)
{ {
//g_dsp.r[DSP_REG_ST0 + stack_reg] = g_dsp.reg_stack[stack_reg][g_dsp.reg_stack_ptr[stack_reg]]; //g_dsp.r[DSP_REG_ST0 + stack_reg] = g_dsp.reg_stack[stack_reg][g_dsp.reg_stack_ptr[stack_reg]];
MOV(8, R(AL), M(&g_dsp.reg_stack_ptr[stack_reg])); MOV(8, R(AL), M(&g_dsp.reg_stack_ptr[stack_reg]));
X64Reg tmp1, tmp2; X64Reg tmp1;
gpr.getFreeXReg(tmp1); gpr.getFreeXReg(tmp1);
gpr.getFreeXReg(tmp2);
MOVZX(64, 8, RAX, R(AL)); MOVZX(64, 8, RAX, R(AL));
MOV(64, R(tmp2), ImmPtr(g_dsp.reg_stack[stack_reg])); MOV(16, R(tmp1), MComplex(EAX, EAX, 1,
MOV(16, R(tmp1), MComplex(tmp2, EAX, SCALE_2, 0)); PtrOffset(&g_dsp.reg_stack[stack_reg][0],nullptr)));
MOV(16, M(&g_dsp.r.st[stack_reg]), R(tmp1)); MOV(16, M(&g_dsp.r.st[stack_reg]), R(tmp1));
gpr.putXReg(tmp1); gpr.putXReg(tmp1);
gpr.putXReg(tmp2);
//g_dsp.reg_stack_ptr[stack_reg]--; //g_dsp.reg_stack_ptr[stack_reg]--;
//g_dsp.reg_stack_ptr[stack_reg] &= DSP_STACK_MASK; //g_dsp.reg_stack_ptr[stack_reg] &= DSP_STACK_MASK;

View File

@ -40,7 +40,7 @@ void Jit64AsmRoutineManager::Generate()
// Two statically allocated registers. // Two statically allocated registers.
//MOV(64, R(RMEM), Imm64((u64)Memory::physical_base)); //MOV(64, R(RMEM), Imm64((u64)Memory::physical_base));
MOV(64, R(RPPCSTATE), ImmPtr(PPCSTATE_BASE)); MOV(64, R(RPPCSTATE), Imm64((u64)&PowerPC::ppcState + 0x80));
const u8* outerLoop = GetCodePtr(); const u8* outerLoop = GetCodePtr();
ABI_PushRegistersAndAdjustStack({}, 0); ABI_PushRegistersAndAdjustStack({}, 0);
@ -103,9 +103,9 @@ void Jit64AsmRoutineManager::Generate()
// optimizations safe, because IR and DR are usually set/cleared together. // optimizations safe, because IR and DR are usually set/cleared together.
// TODO: Branching based on the 20 most significant bits of instruction // TODO: Branching based on the 20 most significant bits of instruction
// addresses without translating them is wrong. // addresses without translating them is wrong.
u8* icache = jit->GetBlockCache()->iCache.data(); u64 icache = (u64)jit->GetBlockCache()->iCache.data();
u8* icacheVmem = jit->GetBlockCache()->iCacheVMEM.data(); u64 icacheVmem = (u64)jit->GetBlockCache()->iCacheVMEM.data();
u8* icacheEx = jit->GetBlockCache()->iCacheEx.data(); u64 icacheEx = (u64)jit->GetBlockCache()->iCacheEx.data();
u32 mask = 0; u32 mask = 0;
FixupBranch no_mem; FixupBranch no_mem;
FixupBranch exit_mem; FixupBranch exit_mem;
@ -117,13 +117,13 @@ void Jit64AsmRoutineManager::Generate()
no_mem = J_CC(CC_NZ); no_mem = J_CC(CC_NZ);
AND(32, R(RSCRATCH), Imm32(JIT_ICACHE_MASK)); AND(32, R(RSCRATCH), Imm32(JIT_ICACHE_MASK));
if (FitsInS32(PPCSTATE_OFS(icache))) if (icache <= INT_MAX)
{ {
MOV(32, R(RSCRATCH), MPIC(icache, RSCRATCH)); MOV(32, R(RSCRATCH), MDisp(RSCRATCH, (s32)icache));
} }
else else
{ {
MOV(64, R(RSCRATCH2), ImmPtr(icache)); MOV(64, R(RSCRATCH2), Imm64(icache));
MOV(32, R(RSCRATCH), MRegSum(RSCRATCH2, RSCRATCH)); MOV(32, R(RSCRATCH), MRegSum(RSCRATCH2, RSCRATCH));
} }
@ -132,14 +132,13 @@ void Jit64AsmRoutineManager::Generate()
TEST(32, R(RSCRATCH), Imm32(JIT_ICACHE_VMEM_BIT)); TEST(32, R(RSCRATCH), Imm32(JIT_ICACHE_VMEM_BIT));
FixupBranch no_vmem = J_CC(CC_Z); FixupBranch no_vmem = J_CC(CC_Z);
AND(32, R(RSCRATCH), Imm32(JIT_ICACHE_MASK)); AND(32, R(RSCRATCH), Imm32(JIT_ICACHE_MASK));
if (icacheVmem <= INT_MAX)
if (FitsInS32(PPCSTATE_OFS(icacheVmem)))
{ {
MOV(32, R(RSCRATCH), MPIC(icacheVmem, RSCRATCH)); MOV(32, R(RSCRATCH), MDisp(RSCRATCH, (s32)icacheVmem));
} }
else else
{ {
MOV(64, R(RSCRATCH2), ImmPtr(icacheVmem)); MOV(64, R(RSCRATCH2), Imm64(icacheVmem));
MOV(32, R(RSCRATCH), MRegSum(RSCRATCH2, RSCRATCH)); MOV(32, R(RSCRATCH), MRegSum(RSCRATCH2, RSCRATCH));
} }
@ -150,13 +149,14 @@ void Jit64AsmRoutineManager::Generate()
TEST(32, R(RSCRATCH), Imm32(JIT_ICACHE_EXRAM_BIT)); TEST(32, R(RSCRATCH), Imm32(JIT_ICACHE_EXRAM_BIT));
FixupBranch no_exram = J_CC(CC_Z); FixupBranch no_exram = J_CC(CC_Z);
AND(32, R(RSCRATCH), Imm32(JIT_ICACHEEX_MASK)); AND(32, R(RSCRATCH), Imm32(JIT_ICACHEEX_MASK));
if (FitsInS32(PPCSTATE_OFS(icacheEx)))
if (icacheEx <= INT_MAX)
{ {
MOV(32, R(RSCRATCH), MPIC(icacheEx, RSCRATCH)); MOV(32, R(RSCRATCH), MDisp(RSCRATCH, (s32)icacheEx));
} }
else else
{ {
MOV(64, R(RSCRATCH2), ImmPtr(icacheEx)); MOV(64, R(RSCRATCH2), Imm64(icacheEx));
MOV(32, R(RSCRATCH), MRegSum(RSCRATCH2, RSCRATCH)); MOV(32, R(RSCRATCH), MRegSum(RSCRATCH2, RSCRATCH));
} }
@ -169,17 +169,16 @@ void Jit64AsmRoutineManager::Generate()
TEST(32, R(RSCRATCH), R(RSCRATCH)); TEST(32, R(RSCRATCH), R(RSCRATCH));
FixupBranch notfound = J_CC(CC_L); FixupBranch notfound = J_CC(CC_L);
//grab from list and jump to it //grab from list and jump to it
const u8** codePointers = jit->GetBlockCache()->GetCodePointers(); u64 codePointers = (u64)jit->GetBlockCache()->GetCodePointers();
if (FitsInS32(PPCSTATE_OFS(codePointers))) if (codePointers <= INT_MAX)
{ {
JMPptr(MPIC(codePointers, RSCRATCH, SCALE_8)); JMPptr(MScaled(RSCRATCH, SCALE_8, (s32)codePointers));
} }
else else
{ {
MOV(64, R(RSCRATCH2), ImmPtr(codePointers)); MOV(64, R(RSCRATCH2), Imm64(codePointers));
JMPptr(MComplex(RSCRATCH2, RSCRATCH, SCALE_8, 0)); JMPptr(MComplex(RSCRATCH2, RSCRATCH, SCALE_8, 0));
} }
SetJumpTarget(notfound); SetJumpTarget(notfound);
//Ok, no block, let's jit //Ok, no block, let's jit
@ -272,7 +271,7 @@ void Jit64AsmRoutineManager::GenerateCommon()
CMP(32, R(ABI_PARAM2), Imm32(0xCC008000)); CMP(32, R(ABI_PARAM2), Imm32(0xCC008000));
FixupBranch skip_fast_write = J_CC(CC_NE, false); FixupBranch skip_fast_write = J_CC(CC_NE, false);
MOV(32, RSCRATCH, M(&m_gatherPipeCount)); MOV(32, RSCRATCH, M(&m_gatherPipeCount));
MOV(8, MPIC(&m_gatherPipe, RSCRATCH), ABI_PARAM1); MOV(8, MDisp(RSCRATCH, (u32)&m_gatherPipe), ABI_PARAM1);
ADD(32, 1, M(&m_gatherPipeCount)); ADD(32, 1, M(&m_gatherPipeCount));
RET(); RET();
SetJumpTarget(skip_fast_write); SetJumpTarget(skip_fast_write);

View File

@ -34,7 +34,7 @@ public:
m_stack_top = stack_top; m_stack_top = stack_top;
// NOTE: When making large additions to the AsmCommon code, you might // NOTE: When making large additions to the AsmCommon code, you might
// want to ensure this number is big enough. // want to ensure this number is big enough.
AllocCodeSpace(16384, PPCSTATE_BASE); AllocCodeSpace(16384);
Generate(); Generate();
WriteProtect(); WriteProtect();
} }

View File

@ -45,7 +45,7 @@ void Jit64::GenerateOverflow()
//rare). //rare).
static const u8 ovtable[4] = {0, 0, XER_SO_MASK, XER_SO_MASK}; static const u8 ovtable[4] = {0, 0, XER_SO_MASK, XER_SO_MASK};
MOVZX(32, 8, RSCRATCH, PPCSTATE(xer_so_ov)); MOVZX(32, 8, RSCRATCH, PPCSTATE(xer_so_ov));
MOV(8, R(RSCRATCH), MPIC(ovtable, RSCRATCH)); MOV(8, R(RSCRATCH), MDisp(RSCRATCH, (u32)(u64)ovtable));
MOV(8, PPCSTATE(xer_so_ov), R(RSCRATCH)); MOV(8, PPCSTATE(xer_so_ov), R(RSCRATCH));
SetJumpTarget(exit); SetJumpTarget(exit);
} }

View File

@ -132,13 +132,13 @@ void Jit64::psq_stXX(UGeckoInstruction inst)
{ {
// One value // One value
CVTSD2SS(XMM0, fpr.R(s)); CVTSD2SS(XMM0, fpr.R(s));
CALLptr(MPIC(asm_routines.singleStoreQuantized, RSCRATCH, SCALE_8)); CALLptr(MScaled(RSCRATCH, SCALE_8, (u32)(u64)asm_routines.singleStoreQuantized));
} }
else else
{ {
// Pair of values // Pair of values
CVTPD2PS(XMM0, fpr.R(s)); CVTPD2PS(XMM0, fpr.R(s));
CALLptr(MPIC(asm_routines.pairedStoreQuantized, RSCRATCH, SCALE_8)); CALLptr(MScaled(RSCRATCH, SCALE_8, (u32)(u64)asm_routines.pairedStoreQuantized));
} }
if (update && jo.memcheck) if (update && jo.memcheck)
@ -306,7 +306,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst)
AND(32, R(RSCRATCH2), gqr); AND(32, R(RSCRATCH2), gqr);
MOVZX(32, 8, RSCRATCH, R(RSCRATCH2)); MOVZX(32, 8, RSCRATCH, R(RSCRATCH2));
CALLptr(MPIC(&asm_routines.pairedLoadQuantized[w * 8], RSCRATCH, SCALE_8)); CALLptr(MScaled(RSCRATCH, SCALE_8, (u32)(u64)(&asm_routines.pairedLoadQuantized[w * 8])));
MemoryExceptionCheck(); MemoryExceptionCheck();
CVTPS2PD(fpr.RX(s), R(XMM0)); CVTPS2PD(fpr.RX(s), R(XMM0));

View File

@ -458,7 +458,7 @@ void Jit64::mtcrf(UGeckoInstruction inst)
SHR(32, R(RSCRATCH), Imm8(28 - (i * 4))); SHR(32, R(RSCRATCH), Imm8(28 - (i * 4)));
if (i != 0) if (i != 0)
AND(32, R(RSCRATCH), Imm8(0xF)); AND(32, R(RSCRATCH), Imm8(0xF));
MOV(64, R(RSCRATCH), MPIC(m_crTable, RSCRATCH, SCALE_8)); MOV(64, R(RSCRATCH), MScaled(RSCRATCH, SCALE_8, (u32)(u64)m_crTable));
MOV(64, PPCSTATE(cr_val[i]), R(RSCRATCH)); MOV(64, PPCSTATE(cr_val[i]), R(RSCRATCH));
} }
} }
@ -493,7 +493,7 @@ void Jit64::mcrxr(UGeckoInstruction inst)
// [SO OV CA 0] << 3 // [SO OV CA 0] << 3
SHL(32, R(RSCRATCH), Imm8(4)); SHL(32, R(RSCRATCH), Imm8(4));
MOV(64, R(RSCRATCH), MPIC(m_crTable, RSCRATCH)); MOV(64, R(RSCRATCH), MDisp(RSCRATCH, (u32)(u64)m_crTable));
MOV(64, PPCSTATE(cr_val[inst.CRFD]), R(RSCRATCH)); MOV(64, PPCSTATE(cr_val[inst.CRFD]), R(RSCRATCH));
// Clear XER[0-3] // Clear XER[0-3]

View File

@ -24,8 +24,10 @@ void CommonAsmRoutines::GenFifoWrite(int size)
const void* start = GetCodePtr(); const void* start = GetCodePtr();
// Assume value in RSCRATCH // Assume value in RSCRATCH
u32 gather_pipe = (u32)(u64)GPFifo::m_gatherPipe;
_assert_msg_(DYNA_REC, gather_pipe <= 0x7FFFFFFF, "Gather pipe not in low 2GB of memory!");
MOV(32, R(RSCRATCH2), M(&GPFifo::m_gatherPipeCount)); MOV(32, R(RSCRATCH2), M(&GPFifo::m_gatherPipeCount));
SwapAndStore(size, MPIC(GPFifo::m_gatherPipe, RSCRATCH2), RSCRATCH); SwapAndStore(size, MDisp(RSCRATCH2, gather_pipe), RSCRATCH);
ADD(32, R(RSCRATCH2), Imm8(size >> 3)); ADD(32, R(RSCRATCH2), Imm8(size >> 3));
MOV(32, M(&GPFifo::m_gatherPipeCount), R(RSCRATCH2)); MOV(32, M(&GPFifo::m_gatherPipeCount), R(RSCRATCH2));
RET(); RET();
@ -66,8 +68,8 @@ void CommonAsmRoutines::GenFrsqrte()
SHR(64, R(RSCRATCH), Imm8(37)); SHR(64, R(RSCRATCH), Imm8(37));
AND(32, R(RSCRATCH), Imm32(0x7FF)); AND(32, R(RSCRATCH), Imm32(0x7FF));
IMUL(32, RSCRATCH, MPIC(MathUtil::frsqrte_expected_dec, RSCRATCH_EXTRA, SCALE_4)); IMUL(32, RSCRATCH, MScaled(RSCRATCH_EXTRA, SCALE_4, (u32)(u64)MathUtil::frsqrte_expected_dec));
MOV(32, R(RSCRATCH_EXTRA), MPIC(MathUtil::frsqrte_expected_base, RSCRATCH_EXTRA, SCALE_4)); MOV(32, R(RSCRATCH_EXTRA), MScaled(RSCRATCH_EXTRA, SCALE_4, (u32)(u64)MathUtil::frsqrte_expected_base));
SUB(32, R(RSCRATCH_EXTRA), R(RSCRATCH)); SUB(32, R(RSCRATCH_EXTRA), R(RSCRATCH));
SHL(64, R(RSCRATCH_EXTRA), Imm8(26)); SHL(64, R(RSCRATCH_EXTRA), Imm8(26));
OR(64, R(RSCRATCH2), R(RSCRATCH_EXTRA)); // vali |= (s64)(frsqrte_expected_base[index] - frsqrte_expected_dec[index] * (i % 2048)) << 26; OR(64, R(RSCRATCH2), R(RSCRATCH_EXTRA)); // vali |= (s64)(frsqrte_expected_base[index] - frsqrte_expected_dec[index] * (i % 2048)) << 26;
@ -134,11 +136,11 @@ void CommonAsmRoutines::GenFres()
AND(32, R(RSCRATCH), Imm32(0x3FF)); // i % 1024 AND(32, R(RSCRATCH), Imm32(0x3FF)); // i % 1024
AND(32, R(RSCRATCH2), Imm8(0x1F)); // i / 1024 AND(32, R(RSCRATCH2), Imm8(0x1F)); // i / 1024
IMUL(32, RSCRATCH, MPIC(MathUtil::fres_expected_dec, RSCRATCH2, SCALE_4)); IMUL(32, RSCRATCH, MScaled(RSCRATCH2, SCALE_4, (u32)(u64)MathUtil::fres_expected_dec));
ADD(32, R(RSCRATCH), Imm8(1)); ADD(32, R(RSCRATCH), Imm8(1));
SHR(32, R(RSCRATCH), Imm8(1)); SHR(32, R(RSCRATCH), Imm8(1));
MOV(32, R(RSCRATCH2), MPIC(MathUtil::fres_expected_base, RSCRATCH2, SCALE_4)); MOV(32, R(RSCRATCH2), MScaled(RSCRATCH2, SCALE_4, (u32)(u64)MathUtil::fres_expected_base));
SUB(32, R(RSCRATCH2), R(RSCRATCH)); SUB(32, R(RSCRATCH2), R(RSCRATCH));
SHL(64, R(RSCRATCH2), Imm8(29)); SHL(64, R(RSCRATCH2), Imm8(29));
OR(64, R(RSCRATCH2), R(RSCRATCH_EXTRA)); // vali |= (s64)(fres_expected_base[i / 1024] - (fres_expected_dec[i / 1024] * (i % 1024) + 1) / 2) << 29 OR(64, R(RSCRATCH2), R(RSCRATCH_EXTRA)); // vali |= (s64)(fres_expected_base[i / 1024] - (fres_expected_dec[i / 1024] * (i % 1024) + 1) / 2) << 29
@ -197,7 +199,7 @@ void CommonAsmRoutines::GenMfcr()
// SO: Bit 61 set; set flag bit 0 // SO: Bit 61 set; set flag bit 0
// LT: Bit 62 set; set flag bit 3 // LT: Bit 62 set; set flag bit 3
SHR(64, R(cr_val), Imm8(61)); SHR(64, R(cr_val), Imm8(61));
OR(32, R(dst), MPIC(m_flagTable, cr_val, SCALE_4)); OR(32, R(dst), MScaled(cr_val, SCALE_4, (u32)(u64)m_flagTable));
} }
RET(); RET();
@ -245,7 +247,7 @@ void CommonAsmRoutines::GenQuantizedStores()
const u8* storePairedU8 = AlignCode4(); const u8* storePairedU8 = AlignCode4();
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MOVQ_xmm(XMM1, MPIC(m_quantizeTableS, RSCRATCH2)); MOVQ_xmm(XMM1, MDisp(RSCRATCH2, (u32)(u64)m_quantizeTableS));
MULPS(XMM0, R(XMM1)); MULPS(XMM0, R(XMM1));
#ifdef QUANTIZE_OVERFLOW_SAFE #ifdef QUANTIZE_OVERFLOW_SAFE
MINPS(XMM0, M(m_65535)); MINPS(XMM0, M(m_65535));
@ -260,7 +262,7 @@ void CommonAsmRoutines::GenQuantizedStores()
const u8* storePairedS8 = AlignCode4(); const u8* storePairedS8 = AlignCode4();
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MOVQ_xmm(XMM1, MPIC(m_quantizeTableS, RSCRATCH2)); MOVQ_xmm(XMM1, MDisp(RSCRATCH2, (u32)(u64)m_quantizeTableS));
MULPS(XMM0, R(XMM1)); MULPS(XMM0, R(XMM1));
#ifdef QUANTIZE_OVERFLOW_SAFE #ifdef QUANTIZE_OVERFLOW_SAFE
MINPS(XMM0, M(m_65535)); MINPS(XMM0, M(m_65535));
@ -276,7 +278,7 @@ void CommonAsmRoutines::GenQuantizedStores()
const u8* storePairedU16 = AlignCode4(); const u8* storePairedU16 = AlignCode4();
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MOVQ_xmm(XMM1, MPIC(m_quantizeTableS, RSCRATCH2)); MOVQ_xmm(XMM1, MDisp(RSCRATCH2, (u32)(u64)m_quantizeTableS));
MULPS(XMM0, R(XMM1)); MULPS(XMM0, R(XMM1));
if (cpu_info.bSSE4_1) if (cpu_info.bSSE4_1)
@ -308,7 +310,7 @@ void CommonAsmRoutines::GenQuantizedStores()
const u8* storePairedS16 = AlignCode4(); const u8* storePairedS16 = AlignCode4();
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MOVQ_xmm(XMM1, MPIC(m_quantizeTableS, RSCRATCH2)); MOVQ_xmm(XMM1, MDisp(RSCRATCH2, (u32)(u64)m_quantizeTableS));
MULPS(XMM0, R(XMM1)); MULPS(XMM0, R(XMM1));
#ifdef QUANTIZE_OVERFLOW_SAFE #ifdef QUANTIZE_OVERFLOW_SAFE
MINPS(XMM0, M(m_65535)); MINPS(XMM0, M(m_65535));
@ -353,7 +355,7 @@ void CommonAsmRoutines::GenQuantizedSingleStores()
const u8* storeSingleU8 = AlignCode4(); // Used by MKWii const u8* storeSingleU8 = AlignCode4(); // Used by MKWii
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MULSS(XMM0, MPIC(m_quantizeTableS, RSCRATCH2)); MULSS(XMM0, MDisp(RSCRATCH2, (u32)(u64)m_quantizeTableS));
XORPS(XMM1, R(XMM1)); XORPS(XMM1, R(XMM1));
MAXSS(XMM0, R(XMM1)); MAXSS(XMM0, R(XMM1));
MINSS(XMM0, M(&m_255)); MINSS(XMM0, M(&m_255));
@ -363,7 +365,7 @@ void CommonAsmRoutines::GenQuantizedSingleStores()
const u8* storeSingleS8 = AlignCode4(); const u8* storeSingleS8 = AlignCode4();
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MULSS(XMM0, MPIC(m_quantizeTableS, RSCRATCH2)); MULSS(XMM0, MDisp(RSCRATCH2, (u32)(u64)m_quantizeTableS));
MAXSS(XMM0, M(&m_m128)); MAXSS(XMM0, M(&m_m128));
MINSS(XMM0, M(&m_127)); MINSS(XMM0, M(&m_127));
CVTTSS2SI(RSCRATCH, R(XMM0)); CVTTSS2SI(RSCRATCH, R(XMM0));
@ -372,7 +374,7 @@ void CommonAsmRoutines::GenQuantizedSingleStores()
const u8* storeSingleU16 = AlignCode4(); // Used by MKWii const u8* storeSingleU16 = AlignCode4(); // Used by MKWii
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MULSS(XMM0, MPIC(m_quantizeTableS, RSCRATCH2)); MULSS(XMM0, MDisp(RSCRATCH2, (u32)(u64)m_quantizeTableS));
XORPS(XMM1, R(XMM1)); XORPS(XMM1, R(XMM1));
MAXSS(XMM0, R(XMM1)); MAXSS(XMM0, R(XMM1));
MINSS(XMM0, M(m_65535)); MINSS(XMM0, M(m_65535));
@ -382,7 +384,7 @@ void CommonAsmRoutines::GenQuantizedSingleStores()
const u8* storeSingleS16 = AlignCode4(); const u8* storeSingleS16 = AlignCode4();
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MULSS(XMM0, MPIC(m_quantizeTableS, RSCRATCH2)); MULSS(XMM0, MDisp(RSCRATCH2, (u32)(u64)m_quantizeTableS));
MAXSS(XMM0, M(&m_m32768)); MAXSS(XMM0, M(&m_m32768));
MINSS(XMM0, M(&m_32767)); MINSS(XMM0, M(&m_32767));
CVTTSS2SI(RSCRATCH, R(XMM0)); CVTTSS2SI(RSCRATCH, R(XMM0));
@ -482,7 +484,7 @@ void CommonAsmRoutines::GenQuantizedLoads()
} }
CVTDQ2PS(XMM0, R(XMM0)); CVTDQ2PS(XMM0, R(XMM0));
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MOVQ_xmm(XMM1, MPIC(m_dequantizeTableS, RSCRATCH2)); MOVQ_xmm(XMM1, MDisp(RSCRATCH2, (u32)(u64)m_dequantizeTableS));
MULPS(XMM0, R(XMM1)); MULPS(XMM0, R(XMM1));
RET(); RET();
@ -493,7 +495,7 @@ void CommonAsmRoutines::GenQuantizedLoads()
UnsafeLoadRegToRegNoSwap(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 8, 0); // RSCRATCH_EXTRA = 0x000000xx UnsafeLoadRegToRegNoSwap(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 8, 0); // RSCRATCH_EXTRA = 0x000000xx
CVTSI2SS(XMM0, R(RSCRATCH_EXTRA)); CVTSI2SS(XMM0, R(RSCRATCH_EXTRA));
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MULSS(XMM0, MPIC(m_dequantizeTableS, RSCRATCH2)); MULSS(XMM0, MDisp(RSCRATCH2, (u32)(u64)m_dequantizeTableS));
UNPCKLPS(XMM0, M(m_one)); UNPCKLPS(XMM0, M(m_one));
RET(); RET();
@ -521,7 +523,7 @@ void CommonAsmRoutines::GenQuantizedLoads()
} }
CVTDQ2PS(XMM0, R(XMM0)); CVTDQ2PS(XMM0, R(XMM0));
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MOVQ_xmm(XMM1, MPIC(m_dequantizeTableS, RSCRATCH2)); MOVQ_xmm(XMM1, MDisp(RSCRATCH2, (u32)(u64)m_dequantizeTableS));
MULPS(XMM0, R(XMM1)); MULPS(XMM0, R(XMM1));
RET(); RET();
@ -532,7 +534,7 @@ void CommonAsmRoutines::GenQuantizedLoads()
UnsafeLoadRegToRegNoSwap(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 8, 0, true); UnsafeLoadRegToRegNoSwap(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 8, 0, true);
CVTSI2SS(XMM0, R(RSCRATCH_EXTRA)); CVTSI2SS(XMM0, R(RSCRATCH_EXTRA));
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MULSS(XMM0, MPIC(m_dequantizeTableS, RSCRATCH2)); MULSS(XMM0, MDisp(RSCRATCH2, (u32)(u64)m_dequantizeTableS));
UNPCKLPS(XMM0, M(m_one)); UNPCKLPS(XMM0, M(m_one));
RET(); RET();
@ -555,7 +557,7 @@ void CommonAsmRoutines::GenQuantizedLoads()
} }
CVTDQ2PS(XMM0, R(XMM0)); CVTDQ2PS(XMM0, R(XMM0));
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MOVQ_xmm(XMM1, MPIC(m_dequantizeTableS, RSCRATCH2)); MOVQ_xmm(XMM1, MDisp(RSCRATCH2, (u32)(u64)m_dequantizeTableS));
MULPS(XMM0, R(XMM1)); MULPS(XMM0, R(XMM1));
RET(); RET();
@ -566,7 +568,7 @@ void CommonAsmRoutines::GenQuantizedLoads()
UnsafeLoadRegToReg(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 16, 0, false); UnsafeLoadRegToReg(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 16, 0, false);
CVTSI2SS(XMM0, R(RSCRATCH_EXTRA)); CVTSI2SS(XMM0, R(RSCRATCH_EXTRA));
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MULSS(XMM0, MPIC(m_dequantizeTableS, RSCRATCH2)); MULSS(XMM0, MDisp(RSCRATCH2, (u32)(u64)m_dequantizeTableS));
UNPCKLPS(XMM0, M(m_one)); UNPCKLPS(XMM0, M(m_one));
RET(); RET();
@ -588,7 +590,7 @@ void CommonAsmRoutines::GenQuantizedLoads()
} }
CVTDQ2PS(XMM0, R(XMM0)); CVTDQ2PS(XMM0, R(XMM0));
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MOVQ_xmm(XMM1, MPIC(m_dequantizeTableS, RSCRATCH2)); MOVQ_xmm(XMM1, MDisp(RSCRATCH2, (u32)(u64)m_dequantizeTableS));
MULPS(XMM0, R(XMM1)); MULPS(XMM0, R(XMM1));
RET(); RET();
@ -599,7 +601,7 @@ void CommonAsmRoutines::GenQuantizedLoads()
UnsafeLoadRegToReg(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 16, 0, true); UnsafeLoadRegToReg(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 16, 0, true);
CVTSI2SS(XMM0, R(RSCRATCH_EXTRA)); CVTSI2SS(XMM0, R(RSCRATCH_EXTRA));
SHR(32, R(RSCRATCH2), Imm8(5)); SHR(32, R(RSCRATCH2), Imm8(5));
MULSS(XMM0, MPIC(m_dequantizeTableS, RSCRATCH2)); MULSS(XMM0, MDisp(RSCRATCH2, (u32)(u64)m_dequantizeTableS));
UNPCKLPS(XMM0, M(m_one)); UNPCKLPS(XMM0, M(m_one));
RET(); RET();

View File

@ -1611,7 +1611,7 @@ static void DoWriteCode(IRBuilder* ibuild, JitIL* Jit, u32 exitAddress)
Jit->OR(32, R(RSCRATCH), Imm8(w << 3)); Jit->OR(32, R(RSCRATCH), Imm8(w << 3));
Jit->MOV(32, R(RSCRATCH_EXTRA), regLocForInst(RI, getOp1(I))); Jit->MOV(32, R(RSCRATCH_EXTRA), regLocForInst(RI, getOp1(I)));
Jit->CALLptr(MPIC(Jit->asm_routines.pairedLoadQuantized, RSCRATCH, SCALE_8)); Jit->CALLptr(MScaled(RSCRATCH, SCALE_8, (u32)(u64)(Jit->asm_routines.pairedLoadQuantized)));
Jit->MOVAPD(reg, R(XMM0)); Jit->MOVAPD(reg, R(XMM0));
RI.fregs[reg] = I; RI.fregs[reg] = I;
regNormalRegClear(RI, I); regNormalRegClear(RI, I);
@ -1669,7 +1669,7 @@ static void DoWriteCode(IRBuilder* ibuild, JitIL* Jit, u32 exitAddress)
Jit->MOV(32, R(RSCRATCH_EXTRA), regLocForInst(RI, getOp2(I))); Jit->MOV(32, R(RSCRATCH_EXTRA), regLocForInst(RI, getOp2(I)));
Jit->MOVAPD(XMM0, fregLocForInst(RI, getOp1(I))); Jit->MOVAPD(XMM0, fregLocForInst(RI, getOp1(I)));
Jit->CALLptr(MPIC(Jit->asm_routines.pairedStoreQuantized, RSCRATCH, SCALE_8)); Jit->CALLptr(MScaled(RSCRATCH, SCALE_8, (u32)(u64)(Jit->asm_routines.pairedStoreQuantized)));
if (RI.IInfo[I - RI.FirstI] & 4) if (RI.IInfo[I - RI.FirstI] & 4)
fregClearInst(RI, getOp1(I)); fregClearInst(RI, getOp1(I));
if (RI.IInfo[I - RI.FirstI] & 8) if (RI.IInfo[I - RI.FirstI] & 8)

View File

@ -250,7 +250,7 @@ void JitIL::Init()
UpdateMemoryOptions(); UpdateMemoryOptions();
trampolines.Init(jo.memcheck ? TRAMPOLINE_CODE_SIZE_MMU : TRAMPOLINE_CODE_SIZE); trampolines.Init(jo.memcheck ? TRAMPOLINE_CODE_SIZE_MMU : TRAMPOLINE_CODE_SIZE);
AllocCodeSpace(CODE_SIZE, PPCSTATE_BASE); AllocCodeSpace(CODE_SIZE);
blocks.Init(); blocks.Init();
asm_routines.Init(nullptr); asm_routines.Init(nullptr);

View File

@ -43,18 +43,6 @@
// to address as much as possible in a one-byte offset form. // to address as much as possible in a one-byte offset form.
#define RPPCSTATE RBP #define RPPCSTATE RBP
namespace Gen
{
inline OpArg MPIC(const void* address, X64Reg scale_reg, int scale = SCALE_1)
{
ptrdiff_t offset = PPCSTATE_OFS(address);
_dbg_assert_(DYNA_REC, FitsInS32(offset));
return MComplex(RPPCSTATE, scale_reg, scale, offset);
}
}
// Use these to control the instruction selection // Use these to control the instruction selection
// #define INSTRUCTION_START FallBackToInterpreter(inst); return; // #define INSTRUCTION_START FallBackToInterpreter(inst); return;
// #define INSTRUCTION_START PPCTables::CountInstruction(inst); // #define INSTRUCTION_START PPCTables::CountInstruction(inst);

View File

@ -173,11 +173,11 @@ private:
u32 all_ones = (1ULL << sbits) - 1; u32 all_ones = (1ULL << sbits) - 1;
if ((all_ones & mask) == all_ones) if ((all_ones & mask) == all_ones)
{ {
MoveOpArgToReg(sbits, MatR(RSCRATCH)); MoveOpArgToReg(sbits, MDisp(RSCRATCH, 0));
} }
else else
{ {
m_code->MOVZX(32, sbits, m_dst_reg, MatR(RSCRATCH)); m_code->MOVZX(32, sbits, m_dst_reg, MDisp(RSCRATCH, 0));
m_code->AND(32, R(m_dst_reg), Imm32(mask)); m_code->AND(32, R(m_dst_reg), Imm32(mask));
if (m_sign_extend) if (m_sign_extend)
m_code->MOVSX(32, sbits, m_dst_reg, R(m_dst_reg)); m_code->MOVSX(32, sbits, m_dst_reg, R(m_dst_reg));

View File

@ -9,15 +9,15 @@
#include "Common/BitSet.h" #include "Common/BitSet.h"
#include "Common/CPUDetect.h" #include "Common/CPUDetect.h"
#include "Common/x64Emitter.h" #include "Common/x64Emitter.h"
#include "Core/PowerPC/PowerPC.h"
namespace MMIO { class Mapping; } namespace MMIO { class Mapping; }
// We offset by 0x80 because the range of one byte memory offsets is // We offset by 0x80 because the range of one byte memory offsets is
// -0x80..0x7f. // -0x80..0x7f.
#define PPCSTATE_BASE ((u8*)&PowerPC::ppcState + 0x80) #define PPCSTATE(x) MDisp(RPPCSTATE, \
#define PPCSTATE_OFS(x) ((u8*)(x) - PPCSTATE_BASE) (int) ((char *) &PowerPC::ppcState.x - (char *) &PowerPC::ppcState) - 0x80)
#define PPCSTATE(x) MDisp(RPPCSTATE, PPCSTATE_OFS(&PowerPC::ppcState.x)) // In case you want to disable the ppcstate register:
// #define PPCSTATE(x) M(&PowerPC::ppcState.x)
#define PPCSTATE_LR PPCSTATE(spr[SPR_LR]) #define PPCSTATE_LR PPCSTATE(spr[SPR_LR])
#define PPCSTATE_CTR PPCSTATE(spr[SPR_CTR]) #define PPCSTATE_CTR PPCSTATE(spr[SPR_CTR])
#define PPCSTATE_SRR0 PPCSTATE(spr[SPR_SRR0]) #define PPCSTATE_SRR0 PPCSTATE(spr[SPR_SRR0])
@ -31,7 +31,7 @@ private:
bool m_enabled = false; bool m_enabled = false;
public: public:
bool Enabled() { return m_enabled; } bool Enabled() { return m_enabled; }
void Init(int size) { AllocCodeSpace(size, PPCSTATE_BASE); m_enabled = true; } void Init(int size) { AllocCodeSpace(size); m_enabled = true; }
void Shutdown() { FreeCodeSpace(); m_enabled = false; } void Shutdown() { FreeCodeSpace(); m_enabled = false; }
}; };

View File

@ -22,7 +22,7 @@ using namespace Gen;
void TrampolineCache::Init(int size) void TrampolineCache::Init(int size)
{ {
AllocCodeSpace(size, PPCSTATE_BASE); AllocCodeSpace(size);
} }
void TrampolineCache::ClearCodeSpace() void TrampolineCache::ClearCodeSpace()

View File

@ -19,7 +19,6 @@
#include "Common/CPUDetect.h" #include "Common/CPUDetect.h"
#include "Common/x64Emitter.h" #include "Common/x64Emitter.h"
#include "Core/PowerPC/JitCommon/Jit_Util.h"
namespace Gen namespace Gen
{ {
@ -95,7 +94,7 @@ protected:
memset(&cpu_info, 0xFF, sizeof (cpu_info)); memset(&cpu_info, 0xFF, sizeof (cpu_info));
emitter.reset(new X64CodeBlock()); emitter.reset(new X64CodeBlock());
emitter->AllocCodeSpace(4096, PPCSTATE_BASE); emitter->AllocCodeSpace(4096);
code_buffer = emitter->GetWritableCodePtr(); code_buffer = emitter->GetWritableCodePtr();
disasm.reset(new disassembler); disasm.reset(new disassembler);