diff --git a/Source/Core/Core/ArmMemTools.cpp b/Source/Core/Core/ArmMemTools.cpp
index c9816aba00..8b166580a8 100644
--- a/Source/Core/Core/ArmMemTools.cpp
+++ b/Source/Core/Core/ArmMemTools.cpp
@@ -32,9 +32,9 @@ typedef struct ucontext {
} ucontext_t;
#endif
-void sigsegv_handler(int signal, siginfo_t *info, void *raw_context)
+static void sigsegv_handler(int sig, siginfo_t *info, void *raw_context)
{
- if (signal != SIGSEGV)
+ if (sig != SIGSEGV)
{
// We are not interested in other signals - handle it as usual.
return;
@@ -47,33 +47,18 @@ void sigsegv_handler(int signal, siginfo_t *info, void *raw_context)
return;
}
-
// Get all the information we can out of the context.
mcontext_t *ctx = &context->uc_mcontext;
- void *fault_memory_ptr = (void*)ctx->arm_r10;
- u8 *fault_instruction_ptr = (u8 *)ctx->arm_pc;
+ // comex says hello, and is most curious whether this is arm_r10 for a
+ // reason as opposed to si_addr like the x64MemTools.cpp version. Is there
+ // even a need for this file to be architecture specific?
+ uintptr_t fault_memory_ptr = (uintptr_t)ctx->arm_r10;
- if (!JitInterface::IsInCodeSpace(fault_instruction_ptr))
+ if (!JitInterface::HandleFault(fault_memory_ptr, ctx))
{
- // Let's not prevent debugging.
- return;
- }
-
- u64 bad_address = (u64)fault_memory_ptr;
- u64 memspace_bottom = (u64)Memory::base;
- if (bad_address < memspace_bottom)
- {
- PanicAlertT("Exception handler - access below memory space. %08llx%08llx",
- bad_address >> 32, bad_address);
- }
-
- u32 em_address = (u32)(bad_address - memspace_bottom);
-
- const u8 *new_rip = jit->BackPatch(fault_instruction_ptr, em_address, ctx);
- if (new_rip)
- {
- ctx->arm_pc = (u32) new_rip;
+ // retry and crash
+ signal(SIGSEGV, SIG_DFL);
}
}
diff --git a/Source/Core/Core/CMakeLists.txt b/Source/Core/Core/CMakeLists.txt
index 78172d5cb0..39800a448e 100644
--- a/Source/Core/Core/CMakeLists.txt
+++ b/Source/Core/Core/CMakeLists.txt
@@ -195,9 +195,10 @@ if(_M_X86)
PowerPC/Jit64/Jit_Paired.cpp
PowerPC/Jit64/JitRegCache.cpp
PowerPC/Jit64/Jit_SystemRegisters.cpp
- PowerPC/JitCommon/JitBackpatch.cpp
PowerPC/JitCommon/JitAsmCommon.cpp
- PowerPC/JitCommon/Jit_Util.cpp)
+ PowerPC/JitCommon/JitBackpatch.cpp
+ PowerPC/JitCommon/Jit_Util.cpp
+ PowerPC/JitCommon/TrampolineCache.cpp)
elseif(_M_ARM_32)
set(SRCS ${SRCS}
ArmMemTools.cpp
diff --git a/Source/Core/Core/Core.vcxproj b/Source/Core/Core/Core.vcxproj
index 7ead1e4172..b46357fd3d 100644
--- a/Source/Core/Core/Core.vcxproj
+++ b/Source/Core/Core/Core.vcxproj
@@ -229,6 +229,7 @@
+
@@ -406,6 +407,7 @@
+
@@ -464,4 +466,4 @@
-
\ No newline at end of file
+
diff --git a/Source/Core/Core/Core.vcxproj.filters b/Source/Core/Core/Core.vcxproj.filters
index 39e6aec8f4..faeb9bcd24 100644
--- a/Source/Core/Core/Core.vcxproj.filters
+++ b/Source/Core/Core/Core.vcxproj.filters
@@ -640,6 +640,9 @@
PowerPC\JitCommon
+
+ PowerPC\JitCommon
+
PowerPC\JitIL
@@ -1182,6 +1185,9 @@
PowerPC\JitCommon
+
+ PowerPC\JitCommon
+
PowerPC\JitIL
@@ -1204,4 +1210,4 @@
-
\ No newline at end of file
+
diff --git a/Source/Core/Core/PowerPC/Jit64IL/JitIL.h b/Source/Core/Core/PowerPC/Jit64IL/JitIL.h
index d0185719f3..5592500c2a 100644
--- a/Source/Core/Core/PowerPC/Jit64IL/JitIL.h
+++ b/Source/Core/Core/PowerPC/Jit64IL/JitIL.h
@@ -56,6 +56,10 @@ public:
void Trace();
+ JitBlockCache *GetBlockCache() override { return &blocks; }
+
+ bool HandleFault(uintptr_t access_address, SContext* ctx) override { return false; }
+
void ClearCache() override;
const u8 *GetDispatcher()
{
@@ -105,4 +109,5 @@ public:
void DynaRunTable31(UGeckoInstruction _inst) override;
void DynaRunTable59(UGeckoInstruction _inst) override;
void DynaRunTable63(UGeckoInstruction _inst) override;
+
};
diff --git a/Source/Core/Core/PowerPC/JitArm32/Jit.h b/Source/Core/Core/PowerPC/JitArm32/Jit.h
index 3cd4cf4478..3fa62d80ab 100644
--- a/Source/Core/Core/PowerPC/JitArm32/Jit.h
+++ b/Source/Core/Core/PowerPC/JitArm32/Jit.h
@@ -58,6 +58,8 @@ private:
void SetFPException(ArmGen::ARMReg Reg, u32 Exception);
ArmGen::FixupBranch JumpIfCRFieldBit(int field, int bit, bool jump_if_set);
+
+ bool BackPatch(SContext* ctx);
public:
JitArm() : code_buffer(32000) {}
~JitArm() {}
@@ -72,9 +74,7 @@ public:
JitBaseBlockCache *GetBlockCache() { return &blocks; }
- const u8 *BackPatch(u8 *codePtr, u32 em_address, void *ctx);
-
- bool IsInCodeSpace(u8 *ptr) { return IsInSpace(ptr); }
+ bool HandleFault(uintptr_t access_address, SContext* ctx) override;
void Trace();
diff --git a/Source/Core/Core/PowerPC/JitArm32/JitArm_BackPatch.cpp b/Source/Core/Core/PowerPC/JitArm32/JitArm_BackPatch.cpp
index 6ba24195f5..ee0cf1ee76 100644
--- a/Source/Core/Core/PowerPC/JitArm32/JitArm_BackPatch.cpp
+++ b/Source/Core/Core/PowerPC/JitArm32/JitArm_BackPatch.cpp
@@ -66,12 +66,23 @@ bool DisamLoadStore(const u32 inst, ARMReg &rD, u8 &accessSize, bool &Store)
}
return true;
}
-const u8 *JitArm::BackPatch(u8 *codePtr, u32, void *ctx_void)
+
+bool JitArm::HandleFault(uintptr_t access_address, SContext* ctx)
+{
+ if (access_address < (uintptr_t)Memory::base)
+ {
+ PanicAlertT("Exception handler - access below memory space. %08llx%08llx",
+ access_address >> 32, access_address);
+ }
+ return BackPatch(ctx);
+}
+
+bool JitArm::BackPatch(SContext* ctx)
{
// TODO: This ctx needs to be filled with our information
- SContext *ctx = (SContext *)ctx_void;
// We need to get the destination register before we start
+ u8* codePtr = (u8*)ctx->CTX_PC;
u32 Value = *(u32*)codePtr;
ARMReg rD;
u8 accessSize;
@@ -109,7 +120,7 @@ const u8 *JitArm::BackPatch(u8 *codePtr, u32, void *ctx_void)
u32 newPC = ctx->CTX_PC - (ARMREGOFFSET + 4 * 4);
ctx->CTX_PC = newPC;
emitter.FlushIcache();
- return (u8*)ctx->CTX_PC;
+ return true;
}
else
{
@@ -135,7 +146,7 @@ const u8 *JitArm::BackPatch(u8 *codePtr, u32, void *ctx_void)
emitter.MOV(rD, R14); // 8
ctx->CTX_PC -= ARMREGOFFSET + (4 * 4);
emitter.FlushIcache();
- return (u8*)ctx->CTX_PC;
+ return true;
}
return 0;
}
diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp b/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp
index 6e89ae32eb..ea921817b8 100644
--- a/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp
+++ b/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp
@@ -3,24 +3,14 @@
// Refer to the license.txt file included.
#include
-#include
#include "disasm.h"
-#include "Common/CommonTypes.h"
-#include "Common/StringUtil.h"
#include "Core/PowerPC/JitCommon/JitBackpatch.h"
#include "Core/PowerPC/JitCommon/JitBase.h"
-#ifdef _WIN32
- #include
-#endif
-
-
using namespace Gen;
-extern u8 *trampolineCodePtr;
-
static void BackPatchError(const std::string &text, u8 *codePtr, u32 emAddress)
{
u64 code_addr = (u64)codePtr;
@@ -35,176 +25,51 @@ static void BackPatchError(const std::string &text, u8 *codePtr, u32 emAddress)
return;
}
-void TrampolineCache::Init()
+// This generates some fairly heavy trampolines, but it doesn't really hurt.
+// Only instructions that access I/O will get these, and there won't be that
+// many of them in a typical program/game.
+bool Jitx86Base::HandleFault(uintptr_t access_address, SContext* ctx)
{
- AllocCodeSpace(4 * 1024 * 1024);
+ // TODO: do we properly handle off-the-end?
+ if (access_address >= (uintptr_t)Memory::base && access_address < (uintptr_t)Memory::base + 0x100010000)
+ return BackPatch((u32)(access_address - (uintptr_t)Memory::base), ctx);
+
+ return false;
}
-void TrampolineCache::Shutdown()
+bool Jitx86Base::BackPatch(u32 emAddress, SContext* ctx)
{
- FreeCodeSpace();
-}
+ u8* codePtr = (u8*) ctx->CTX_PC;
-// Extremely simplistic - just generate the requested trampoline. May reuse them in the future.
-const u8 *TrampolineCache::GetReadTrampoline(const InstructionInfo &info, u32 registersInUse)
-{
- if (GetSpaceLeft() < 1024)
- PanicAlert("Trampoline cache full");
-
- const u8 *trampoline = GetCodePtr();
- X64Reg addrReg = (X64Reg)info.scaledReg;
- X64Reg dataReg = (X64Reg)info.regOperandReg;
-
- // It's a read. Easy.
- // RSP alignment here is 8 due to the call.
- ABI_PushRegistersAndAdjustStack(registersInUse, 8);
-
- if (addrReg != ABI_PARAM1)
- MOV(32, R(ABI_PARAM1), R((X64Reg)addrReg));
-
- if (info.displacement)
- ADD(32, R(ABI_PARAM1), Imm32(info.displacement));
-
- switch (info.operandSize)
- {
- case 4:
- CALL((void *)&Memory::Read_U32);
- break;
- case 2:
- CALL((void *)&Memory::Read_U16);
- SHL(32, R(ABI_RETURN), Imm8(16));
- break;
- case 1:
- CALL((void *)&Memory::Read_U8);
- break;
- }
-
- if (info.signExtend && info.operandSize == 1)
- {
- // Need to sign extend value from Read_U8.
- MOVSX(32, 8, dataReg, R(ABI_RETURN));
- }
- else if (dataReg != EAX)
- {
- MOV(32, R(dataReg), R(ABI_RETURN));
- }
-
- ABI_PopRegistersAndAdjustStack(registersInUse, 8);
- RET();
- return trampoline;
-}
-
-// Extremely simplistic - just generate the requested trampoline. May reuse them in the future.
-const u8 *TrampolineCache::GetWriteTrampoline(const InstructionInfo &info, u32 registersInUse, u32 pc)
-{
- if (GetSpaceLeft() < 1024)
- PanicAlert("Trampoline cache full");
-
- const u8 *trampoline = GetCodePtr();
-
- X64Reg dataReg = (X64Reg)info.regOperandReg;
- X64Reg addrReg = (X64Reg)info.scaledReg;
-
- // It's a write. Yay. Remember that we don't have to be super efficient since it's "just" a
- // hardware access - we can take shortcuts.
- // Don't treat FIFO writes specially for now because they require a burst
- // check anyway.
-
- // PC is used by memory watchpoints (if enabled) or to print accurate PC locations in debug logs
- MOV(32, PPCSTATE(pc), Imm32(pc));
-
- ABI_PushRegistersAndAdjustStack(registersInUse, 8);
-
- if (info.hasImmediate)
- {
- if (addrReg != ABI_PARAM2)
- MOV(64, R(ABI_PARAM2), R(addrReg));
- // we have to swap back the immediate to pass it to the write functions
- switch (info.operandSize)
- {
- case 8:
- PanicAlert("Invalid 64-bit immediate!");
- break;
- case 4:
- MOV(32, R(ABI_PARAM1), Imm32(Common::swap32((u32)info.immediate)));
- break;
- case 2:
- MOV(16, R(ABI_PARAM1), Imm16(Common::swap16((u16)info.immediate)));
- break;
- case 1:
- MOV(8, R(ABI_PARAM1), Imm8((u8)info.immediate));
- break;
- }
- }
- else
- {
- MOVTwo(64, ABI_PARAM1, dataReg, ABI_PARAM2, addrReg);
- }
- if (info.displacement)
- {
- ADD(32, R(ABI_PARAM2), Imm32(info.displacement));
- }
-
- switch (info.operandSize)
- {
- case 8:
- CALL((void *)&Memory::Write_U64);
- break;
- case 4:
- CALL((void *)&Memory::Write_U32);
- break;
- case 2:
- CALL((void *)&Memory::Write_U16);
- break;
- case 1:
- CALL((void *)&Memory::Write_U8);
- break;
- }
-
- ABI_PopRegistersAndAdjustStack(registersInUse, 8);
- RET();
-
- return trampoline;
-}
-
-
-// This generates some fairly heavy trampolines, but:
-// 1) It's really necessary. We don't know anything about the context.
-// 2) It doesn't really hurt. Only instructions that access I/O will get these, and there won't be
-// that many of them in a typical program/game.
-const u8 *Jitx86Base::BackPatch(u8 *codePtr, u32 emAddress, void *ctx_void)
-{
- SContext *ctx = (SContext *)ctx_void;
-
- if (!jit->IsInCodeSpace(codePtr))
- return nullptr; // this will become a regular crash real soon after this
+ if (!IsInSpace(codePtr))
+ return false; // this will become a regular crash real soon after this
InstructionInfo info = {};
if (!DisassembleMov(codePtr, &info))
{
BackPatchError("BackPatch - failed to disassemble MOV instruction", codePtr, emAddress);
- return nullptr;
+ return false;
}
if (info.otherReg != RMEM)
{
PanicAlert("BackPatch : Base reg not RMEM."
"\n\nAttempted to access %08x.", emAddress);
- return nullptr;
+ return false;
}
if (info.byteSwap && info.instructionSize < BACKPATCH_SIZE)
{
PanicAlert("BackPatch: MOVBE is too small");
- return nullptr;
+ return false;
}
auto it = registersInUseAtLoc.find(codePtr);
if (it == registersInUseAtLoc.end())
{
PanicAlert("BackPatch: no register use entry for address %p", codePtr);
- return nullptr;
+ return false;
}
u32 registersInUse = it->second;
@@ -228,7 +93,7 @@ const u8 *Jitx86Base::BackPatch(u8 *codePtr, u32 emAddress, void *ctx_void)
{
emitter.NOP(padding);
}
- return codePtr;
+ ctx->CTX_PC = (u64)codePtr;
}
else
{
@@ -281,6 +146,8 @@ const u8 *Jitx86Base::BackPatch(u8 *codePtr, u32 emAddress, void *ctx_void)
{
emitter.NOP(padding);
}
- return start;
+ ctx->CTX_PC = (u64)start;
}
+
+ return true;
}
diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.h b/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.h
index 3ca7656b21..39e3389501 100644
--- a/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.h
+++ b/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.h
@@ -5,11 +5,6 @@
#pragma once
#include "Common/CommonTypes.h"
-#include "Common/x64Analyzer.h"
-#include "Common/x64Emitter.h"
-
-// We need at least this many bytes for backpatching.
-const int BACKPATCH_SIZE = 5;
// meh.
#if defined(_WIN32)
@@ -147,8 +142,8 @@ const int BACKPATCH_SIZE = 5;
#endif
#if _M_X86_64
-#define CTX_PC CTX_RIP
#include
+#define CTX_PC CTX_RIP
static inline u64 *ContextRN(SContext* ctx, int n)
{
static const u8 offsets[] =
@@ -173,13 +168,3 @@ static inline u64 *ContextRN(SContext* ctx, int n)
return (u64 *) ((char *) ctx + offsets[n]);
}
#endif
-
-class TrampolineCache : public Gen::X64CodeBlock
-{
-public:
- void Init();
- void Shutdown();
-
- const u8 *GetReadTrampoline(const InstructionInfo &info, u32 registersInUse);
- const u8 *GetWriteTrampoline(const InstructionInfo &info, u32 registersInUse, u32 pc);
-};
diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBase.h b/Source/Core/Core/PowerPC/JitCommon/JitBase.h
index c6ff6e4967..52463ec619 100644
--- a/Source/Core/Core/PowerPC/JitCommon/JitBase.h
+++ b/Source/Core/Core/PowerPC/JitCommon/JitBase.h
@@ -26,6 +26,7 @@
#include "Core/PowerPC/JitCommon/JitAsmCommon.h"
#include "Core/PowerPC/JitCommon/JitBackpatch.h"
#include "Core/PowerPC/JitCommon/JitCache.h"
+#include "Core/PowerPC/JitCommon/TrampolineCache.h"
// TODO: find a better place for x86-specific stuff
// The following register assignments are common to Jit64 and Jit64IL:
@@ -110,24 +111,20 @@ public:
virtual void Jit(u32 em_address) = 0;
- virtual const u8 *BackPatch(u8 *codePtr, u32 em_address, void *ctx) = 0;
-
virtual const CommonAsmRoutinesBase *GetAsmRoutines() = 0;
- virtual bool IsInCodeSpace(u8 *ptr) = 0;
+ virtual bool HandleFault(uintptr_t access_address, SContext* ctx) = 0;
};
class Jitx86Base : public JitBase, public EmuCodeBlock
{
protected:
+ bool BackPatch(u32 emAddress, SContext* ctx);
JitBlockCache blocks;
TrampolineCache trampolines;
public:
JitBlockCache *GetBlockCache() override { return &blocks; }
-
- const u8 *BackPatch(u8 *codePtr, u32 em_address, void *ctx) override;
-
- bool IsInCodeSpace(u8 *ptr) override { return IsInSpace(ptr); }
+ bool HandleFault(uintptr_t access_address, SContext* ctx) override;
};
extern JitBase *jit;
diff --git a/Source/Core/Core/PowerPC/JitCommon/TrampolineCache.cpp b/Source/Core/Core/PowerPC/JitCommon/TrampolineCache.cpp
new file mode 100644
index 0000000000..5e961bc6e5
--- /dev/null
+++ b/Source/Core/Core/PowerPC/JitCommon/TrampolineCache.cpp
@@ -0,0 +1,156 @@
+// Copyright 2013 Dolphin Emulator Project
+// Licensed under GPLv2
+// Refer to the license.txt file included.
+
+#include
+#include
+
+#include "Common/CommonTypes.h"
+#include "Common/StringUtil.h"
+#include "Common/x64ABI.h"
+#include "Core/HW/Memmap.h"
+#include "Core/PowerPC/JitCommon/JitBase.h"
+#include "Core/PowerPC/JitCommon/TrampolineCache.h"
+
+#ifdef _WIN32
+ #include
+#endif
+
+
+using namespace Gen;
+
+extern u8 *trampolineCodePtr;
+
+void TrampolineCache::Init()
+{
+ AllocCodeSpace(4 * 1024 * 1024);
+}
+
+void TrampolineCache::Shutdown()
+{
+ FreeCodeSpace();
+}
+
+// Extremely simplistic - just generate the requested trampoline. May reuse them in the future.
+const u8 *TrampolineCache::GetReadTrampoline(const InstructionInfo &info, u32 registersInUse)
+{
+ if (GetSpaceLeft() < 1024)
+ PanicAlert("Trampoline cache full");
+
+ const u8 *trampoline = GetCodePtr();
+ X64Reg addrReg = (X64Reg)info.scaledReg;
+ X64Reg dataReg = (X64Reg)info.regOperandReg;
+
+ // It's a read. Easy.
+ // RSP alignment here is 8 due to the call.
+ ABI_PushRegistersAndAdjustStack(registersInUse, 8);
+
+ if (addrReg != ABI_PARAM1)
+ MOV(32, R(ABI_PARAM1), R((X64Reg)addrReg));
+
+ if (info.displacement)
+ ADD(32, R(ABI_PARAM1), Imm32(info.displacement));
+
+ switch (info.operandSize)
+ {
+ case 4:
+ CALL((void *)&Memory::Read_U32);
+ break;
+ case 2:
+ CALL((void *)&Memory::Read_U16);
+ SHL(32, R(ABI_RETURN), Imm8(16));
+ break;
+ case 1:
+ CALL((void *)&Memory::Read_U8);
+ break;
+ }
+
+ if (info.signExtend && info.operandSize == 1)
+ {
+ // Need to sign extend value from Read_U8.
+ MOVSX(32, 8, dataReg, R(ABI_RETURN));
+ }
+ else if (dataReg != EAX)
+ {
+ MOV(32, R(dataReg), R(ABI_RETURN));
+ }
+
+ ABI_PopRegistersAndAdjustStack(registersInUse, 8);
+ RET();
+ return trampoline;
+}
+
+// Extremely simplistic - just generate the requested trampoline. May reuse them in the future.
+const u8 *TrampolineCache::GetWriteTrampoline(const InstructionInfo &info, u32 registersInUse, u32 pc)
+{
+ if (GetSpaceLeft() < 1024)
+ PanicAlert("Trampoline cache full");
+
+ const u8 *trampoline = GetCodePtr();
+
+ X64Reg dataReg = (X64Reg)info.regOperandReg;
+ X64Reg addrReg = (X64Reg)info.scaledReg;
+
+ // It's a write. Yay. Remember that we don't have to be super efficient since it's "just" a
+ // hardware access - we can take shortcuts.
+ // Don't treat FIFO writes specially for now because they require a burst
+ // check anyway.
+
+ // PC is used by memory watchpoints (if enabled) or to print accurate PC locations in debug logs
+ MOV(32, PPCSTATE(pc), Imm32(pc));
+
+ ABI_PushRegistersAndAdjustStack(registersInUse, 8);
+
+ if (info.hasImmediate)
+ {
+ if (addrReg != ABI_PARAM2)
+ MOV(64, R(ABI_PARAM2), R(addrReg));
+ // we have to swap back the immediate to pass it to the write functions
+ switch (info.operandSize)
+ {
+ case 8:
+ PanicAlert("Invalid 64-bit immediate!");
+ break;
+ case 4:
+ MOV(32, R(ABI_PARAM1), Imm32(Common::swap32((u32)info.immediate)));
+ break;
+ case 2:
+ MOV(16, R(ABI_PARAM1), Imm16(Common::swap16((u16)info.immediate)));
+ break;
+ case 1:
+ MOV(8, R(ABI_PARAM1), Imm8((u8)info.immediate));
+ break;
+ }
+ }
+ else
+ {
+ MOVTwo(64, ABI_PARAM1, dataReg, ABI_PARAM2, addrReg);
+ }
+ if (info.displacement)
+ {
+ ADD(32, R(ABI_PARAM2), Imm32(info.displacement));
+ }
+
+ switch (info.operandSize)
+ {
+ case 8:
+ CALL((void *)&Memory::Write_U64);
+ break;
+ case 4:
+ CALL((void *)&Memory::Write_U32);
+ break;
+ case 2:
+ CALL((void *)&Memory::Write_U16);
+ break;
+ case 1:
+ CALL((void *)&Memory::Write_U8);
+ break;
+ }
+
+ ABI_PopRegistersAndAdjustStack(registersInUse, 8);
+ RET();
+
+ return trampoline;
+}
+
+
diff --git a/Source/Core/Core/PowerPC/JitCommon/TrampolineCache.h b/Source/Core/Core/PowerPC/JitCommon/TrampolineCache.h
new file mode 100644
index 0000000000..516a071ac2
--- /dev/null
+++ b/Source/Core/Core/PowerPC/JitCommon/TrampolineCache.h
@@ -0,0 +1,22 @@
+// Copyright 2013 Dolphin Emulator Project
+// Licensed under GPLv2
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "Common/CommonTypes.h"
+#include "Common/x64Analyzer.h"
+#include "Common/x64Emitter.h"
+
+// We need at least this many bytes for backpatching.
+const int BACKPATCH_SIZE = 5;
+
+class TrampolineCache : public Gen::X64CodeBlock
+{
+public:
+ void Init();
+ void Shutdown();
+
+ const u8 *GetReadTrampoline(const InstructionInfo &info, u32 registersInUse);
+ const u8 *GetWriteTrampoline(const InstructionInfo &info, u32 registersInUse, u32 pc);
+};
diff --git a/Source/Core/Core/PowerPC/JitInterface.cpp b/Source/Core/Core/PowerPC/JitInterface.cpp
index ea9b12be70..6bfd1c4009 100644
--- a/Source/Core/Core/PowerPC/JitInterface.cpp
+++ b/Source/Core/Core/PowerPC/JitInterface.cpp
@@ -190,13 +190,9 @@ namespace JitInterface
}
#endif
}
- bool IsInCodeSpace(u8 *ptr)
+ bool HandleFault(uintptr_t access_address, SContext* ctx)
{
- return jit->IsInCodeSpace(ptr);
- }
- const u8 *BackPatch(u8 *codePtr, u32 em_address, void *ctx)
- {
- return jit->BackPatch(codePtr, em_address, ctx);
+ return jit->HandleFault(access_address, ctx);
}
void ClearCache()
diff --git a/Source/Core/Core/PowerPC/JitInterface.h b/Source/Core/Core/PowerPC/JitInterface.h
index 3cb57422bb..a8ed783726 100644
--- a/Source/Core/Core/PowerPC/JitInterface.h
+++ b/Source/Core/Core/PowerPC/JitInterface.h
@@ -7,6 +7,7 @@
#include
#include "Common/ChunkFile.h"
#include "Core/PowerPC/CPUCoreBase.h"
+#include "Core/PowerPC/JitCommon/JitBackpatch.h"
namespace JitInterface
{
@@ -20,8 +21,7 @@ namespace JitInterface
void WriteProfileResults(const std::string& filename);
// Memory Utilities
- bool IsInCodeSpace(u8 *ptr);
- const u8 *BackPatch(u8 *codePtr, u32 em_address, void *ctx);
+ bool HandleFault(uintptr_t access_address, SContext* ctx);
// used by JIT to read instructions
u32 Read_Opcode_JIT(const u32 _Address);
diff --git a/Source/Core/Core/x64MemTools.cpp b/Source/Core/Core/x64MemTools.cpp
index 057d3a83f0..518c3bb160 100644
--- a/Source/Core/Core/x64MemTools.cpp
+++ b/Source/Core/Core/x64MemTools.cpp
@@ -23,42 +23,6 @@
namespace EMM
{
-static bool DoFault(u64 bad_address, SContext *ctx)
-{
- if (!JitInterface::IsInCodeSpace((u8*) ctx->CTX_PC))
- {
- // Let's not prevent debugging.
- return false;
- }
-
- u64 memspace_bottom = (u64)Memory::base;
- u64 memspace_top = memspace_bottom +
-#if _ARCH_64
- 0x100000000ULL;
-#else
- 0x40000000;
-#endif
-
- if (bad_address < memspace_bottom || bad_address >= memspace_top)
- {
- return false;
- }
-
- u32 em_address = (u32)(bad_address - memspace_bottom);
- const u8 *new_pc = jit->BackPatch((u8*) ctx->CTX_PC, em_address, ctx);
- if (new_pc)
- {
- ctx->CTX_PC = (u64) new_pc;
- }
- else
- {
- // there was an error, give the debugger a chance
- return false;
- }
-
- return true;
-}
-
#ifdef _WIN32
LONG NTAPI Handler(PEXCEPTION_POINTERS pPtrs)
@@ -74,10 +38,10 @@ LONG NTAPI Handler(PEXCEPTION_POINTERS pPtrs)
}
// virtual address of the inaccessible data
- u64 badAddress = (u64)pPtrs->ExceptionRecord->ExceptionInformation[1];
+ uintptr_t badAddress = (uintptr_t)pPtrs->ExceptionRecord->ExceptionInformation[1];
CONTEXT *ctx = pPtrs->ContextRecord;
- if (DoFault(badAddress, ctx))
+ if (JitInterface::HandleFault(badAddress, ctx))
{
return (DWORD)EXCEPTION_CONTINUE_EXECUTION;
}
@@ -198,7 +162,7 @@ void ExceptionThread(mach_port_t port)
x86_thread_state64_t *state = (x86_thread_state64_t *) msg_in.old_state;
- bool ok = DoFault(msg_in.code[1], state);
+ bool ok = JitInterface::HandleFault((uintptr_t) msg_in.code[1], state);
// Set up the reply.
msg_out.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(msg_in.Head.msgh_bits), 0);
@@ -263,12 +227,12 @@ static void sigsegv_handler(int sig, siginfo_t *info, void *raw_context)
// Huh? Return.
return;
}
- u64 bad_address = (u64)info->si_addr;
+ uintptr_t bad_address = (uintptr_t)info->si_addr;
// Get all the information we can out of the context.
mcontext_t *ctx = &context->uc_mcontext;
// assume it's not a write
- if (!DoFault(bad_address, ctx))
+ if (!JitInterface::HandleFault(bad_address, ctx))
{
// retry and crash
signal(SIGSEGV, SIG_DFL);