From 37d00cceda58cda08f6206bff7309aaa7ba45e6e Mon Sep 17 00:00:00 2001 From: comex Date: Wed, 22 Apr 2015 23:17:09 -0400 Subject: [PATCH 1/7] Fix wx bullshit which made the watch view assert on my machine. 1) Apparently wxString::Format is type safe, and passing a u32 to it with the format "%lu" crashes with a meaningless assertion failure. Sure, it's the wrong type, but the error sure doesn't help... 2) "A MenuItem ID of Zero does not work under Mac". Thanks for the helpful assert message, no thanks for making your construct have random platform-specific differences for no reason (it's not like menu item IDs directly correspond to a part of Cocoa's menu API like they do on Win32). --- Source/Core/DolphinWX/Debugger/WatchView.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/Core/DolphinWX/Debugger/WatchView.cpp b/Source/Core/DolphinWX/Debugger/WatchView.cpp index a63676426a..293744d457 100644 --- a/Source/Core/DolphinWX/Debugger/WatchView.cpp +++ b/Source/Core/DolphinWX/Debugger/WatchView.cpp @@ -25,7 +25,7 @@ enum { - IDM_DELETEWATCH, + IDM_DELETEWATCH = 1, IDM_ADDMEMCHECK, IDM_VIEWMEMORY, }; @@ -98,7 +98,7 @@ static wxString GetValueByRowCol(int row, int col) case 0: return wxString::Format("%s", GetWatchName(row)); case 1: return wxString::Format("%08x", GetWatchAddr(row)); case 2: return wxString::Format("%08x", GetWatchValue(row)); - case 3: return wxString::Format("%lu", GetWatchValue(row)); + case 3: return wxString::Format("%u", GetWatchValue(row)); case 4: { u32 addr = GetWatchAddr(row); From 0c18e5886de0c04cdba38b7304b622943194c06a Mon Sep 17 00:00:00 2001 From: comex Date: Wed, 22 Apr 2015 23:40:00 -0400 Subject: [PATCH 2/7] Remove mnemonics in MemoryWindow. On OS X, this broke Cmd-V to paste in the text boxes. Apparently wx thinks having mnemonics (which are Alt-* on Windows) be Cmd-* on OS X, even if this disables standard shortcuts, is a good idea. Lioncash suggested just getting rid of the accelerators on non-menu controls, so I'm doing that rather than disabling them only on OS X. --- .../Core/DolphinWX/Debugger/MemoryWindow.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Source/Core/DolphinWX/Debugger/MemoryWindow.cpp b/Source/Core/DolphinWX/Debugger/MemoryWindow.cpp index ec6ff6c295..7c122b0bdc 100644 --- a/Source/Core/DolphinWX/Debugger/MemoryWindow.cpp +++ b/Source/Core/DolphinWX/Debugger/MemoryWindow.cpp @@ -94,27 +94,27 @@ CMemoryWindow::CMemoryWindow(wxWindow* parent, wxWindowID id, sizerBig->Add(sizerRight, 0, wxEXPAND | wxALL, 3); sizerRight->Add(addrbox = new wxTextCtrl(this, IDM_MEM_ADDRBOX, "")); sizerRight->Add(valbox = new wxTextCtrl(this, IDM_VALBOX, "")); - sizerRight->Add(new wxButton(this, IDM_SETVALBUTTON, _("Set &Value"))); + sizerRight->Add(new wxButton(this, IDM_SETVALBUTTON, _("Set Value"))); sizerRight->AddSpacer(5); - sizerRight->Add(new wxButton(this, IDM_DUMP_MEMORY, _("&Dump MRAM"))); - sizerRight->Add(new wxButton(this, IDM_DUMP_MEM2, _("&Dump EXRAM"))); + sizerRight->Add(new wxButton(this, IDM_DUMP_MEMORY, _("Dump MRAM"))); + sizerRight->Add(new wxButton(this, IDM_DUMP_MEM2, _("Dump EXRAM"))); if (!SConfig::GetInstance().m_LocalCoreStartupParameter.bMMU) - sizerRight->Add(new wxButton(this, IDM_DUMP_FAKEVMEM, _("&Dump FakeVMEM"))); + sizerRight->Add(new wxButton(this, IDM_DUMP_FAKEVMEM, _("Dump FakeVMEM"))); wxStaticBoxSizer* sizerSearchType = new wxStaticBoxSizer(wxVERTICAL, this, _("Search")); sizerSearchType->Add(btnSearch = new wxButton(this, IDM_SEARCH, _("Search"))); - sizerSearchType->Add(chkAscii = new wxCheckBox(this, IDM_ASCII, "&Ascii ")); - sizerSearchType->Add(chkHex = new wxCheckBox(this, IDM_HEX, _("&Hex"))); + sizerSearchType->Add(chkAscii = new wxCheckBox(this, IDM_ASCII, "Ascii ")); + sizerSearchType->Add(chkHex = new wxCheckBox(this, IDM_HEX, _("Hex"))); sizerRight->Add(sizerSearchType); wxStaticBoxSizer* sizerDataTypes = new wxStaticBoxSizer(wxVERTICAL, this, _("Data Type")); sizerDataTypes->SetMinSize(74, 40); - sizerDataTypes->Add(chk8 = new wxCheckBox(this, IDM_U8, "&U8")); - sizerDataTypes->Add(chk16 = new wxCheckBox(this, IDM_U16, "&U16")); - sizerDataTypes->Add(chk32 = new wxCheckBox(this, IDM_U32, "&U32")); + sizerDataTypes->Add(chk8 = new wxCheckBox(this, IDM_U8, "U8")); + sizerDataTypes->Add(chk16 = new wxCheckBox(this, IDM_U16, "U16")); + sizerDataTypes->Add(chk32 = new wxCheckBox(this, IDM_U32, "U32")); sizerRight->Add(sizerDataTypes); SetSizer(sizerBig); chkHex->SetValue(1); //Set defaults From 3499f2c2d0a447f0d078f1e1293bdf6ee12fe98b Mon Sep 17 00:00:00 2001 From: comex Date: Fri, 24 Apr 2015 22:19:21 -0400 Subject: [PATCH 3/7] To make up for it, make pressing enter on the value textbox set the value. --- Source/Core/DolphinWX/Debugger/MemoryWindow.cpp | 10 +++++++++- Source/Core/DolphinWX/Debugger/MemoryWindow.h | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/Source/Core/DolphinWX/Debugger/MemoryWindow.cpp b/Source/Core/DolphinWX/Debugger/MemoryWindow.cpp index 7c122b0bdc..8667751109 100644 --- a/Source/Core/DolphinWX/Debugger/MemoryWindow.cpp +++ b/Source/Core/DolphinWX/Debugger/MemoryWindow.cpp @@ -59,6 +59,7 @@ enum BEGIN_EVENT_TABLE(CMemoryWindow, wxPanel) EVT_TEXT(IDM_MEM_ADDRBOX, CMemoryWindow::OnAddrBoxChange) + EVT_TEXT_ENTER(IDM_VALBOX, CMemoryWindow::SetMemoryValueFromValBox) EVT_LISTBOX(IDM_SYMBOLLIST, CMemoryWindow::OnSymbolListChange) EVT_HOST_COMMAND(wxID_ANY, CMemoryWindow::OnHostMessage) EVT_BUTTON(IDM_SETVALBUTTON, CMemoryWindow::SetMemoryValue) @@ -93,7 +94,7 @@ CMemoryWindow::CMemoryWindow(wxWindow* parent, wxWindowID id, sizerBig->Add(memview, 20, wxEXPAND); sizerBig->Add(sizerRight, 0, wxEXPAND | wxALL, 3); sizerRight->Add(addrbox = new wxTextCtrl(this, IDM_MEM_ADDRBOX, "")); - sizerRight->Add(valbox = new wxTextCtrl(this, IDM_VALBOX, "")); + sizerRight->Add(valbox = new wxTextCtrl(this, IDM_VALBOX, "", wxDefaultPosition, wxDefaultSize, wxTE_PROCESS_ENTER)); sizerRight->Add(new wxButton(this, IDM_SETVALBUTTON, _("Set Value"))); sizerRight->AddSpacer(5); @@ -156,6 +157,13 @@ void CMemoryWindow::JumpToAddress(u32 _Address) memview->Center(_Address); } +void CMemoryWindow::SetMemoryValueFromValBox(wxCommandEvent& event) +{ + SetMemoryValue(event); + valbox->SetFocus(); + +} + void CMemoryWindow::SetMemoryValue(wxCommandEvent& event) { if (!Memory::IsInitialized()) diff --git a/Source/Core/DolphinWX/Debugger/MemoryWindow.h b/Source/Core/DolphinWX/Debugger/MemoryWindow.h index ba23bdf01e..479cdcbd5d 100644 --- a/Source/Core/DolphinWX/Debugger/MemoryWindow.h +++ b/Source/Core/DolphinWX/Debugger/MemoryWindow.h @@ -66,6 +66,7 @@ private: void OnCallstackListChange(wxCommandEvent& event); void OnAddrBoxChange(wxCommandEvent& event); void OnHostMessage(wxCommandEvent& event); + void SetMemoryValueFromValBox(wxCommandEvent& event); void SetMemoryValue(wxCommandEvent& event); void OnDumpMemory(wxCommandEvent& event); void OnDumpMem2(wxCommandEvent& event); From b84f6a55ab848e41f23d9f712a116a276a6d0bda Mon Sep 17 00:00:00 2001 From: comex Date: Thu, 23 Apr 2015 00:05:31 -0400 Subject: [PATCH 4/7] Automatically disable fastmem and enable memcheck when there are any watchpoints. - Move JitState::memcheck to JitOptions because it's an option. - Add JitOptions::fastmem; switch JIT code to checking that rather than bFastmem directly. - Add JitBase::UpdateMemoryOptions(), which sets both two JIT options (replacing the duplicate lines in Jit64 and JitIL that set memcheck from bMMU). - (!) The ARM JITs both had some lines that checked js.memcheck despite it being uninitialized in their cases. I've added UpdateMemoryOptions to both. There is a chance this could make something slower compared to the old behavior if the uninitialized value happened to be nonzero... hdkr should check this. - UpdateMemoryOptions forces jo.fastmem and jo.memcheck off and on, respectively, if there are any watchpoints set. - Also call that function from ClearCache. - Have MemChecks call ClearCache when the {first,last} watchpoint is {added,removed}. Enabling jo.memcheck (bah, confusing names) is currently pointless because hitting a watchpoint does not interrupt the basic block. That will change in the next commit. --- Source/Core/Common/BreakPoints.cpp | 7 +++++++ Source/Core/Common/BreakPoints.h | 2 ++ Source/Core/Core/HW/Memmap.cpp | 6 +++--- Source/Core/Core/PowerPC/Jit64/Jit.cpp | 9 +++++---- .../Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp | 12 +++++------ .../PowerPC/Jit64/Jit_LoadStoreFloating.cpp | 12 +++++------ .../PowerPC/Jit64/Jit_LoadStorePaired.cpp | 16 +++++++-------- .../PowerPC/Jit64Common/Jit64AsmCommon.cpp | 20 +++++++++---------- Source/Core/Core/PowerPC/Jit64IL/JitIL.cpp | 10 +++++----- Source/Core/Core/PowerPC/JitArm32/Jit.cpp | 6 ++++-- .../PowerPC/JitArm32/JitArm_LoadStore.cpp | 10 +++++----- .../JitArm32/JitArm_LoadStoreFloating.cpp | 6 +++--- .../JitArm32/JitArm_LoadStorePaired.cpp | 8 ++++---- Source/Core/Core/PowerPC/JitArm64/Jit.cpp | 6 ++++-- .../PowerPC/JitArm64/JitArm64_LoadStore.cpp | 8 ++++---- .../JitArm64/JitArm64_LoadStoreFloating.cpp | 8 ++++---- .../JitArm64/JitArm64_LoadStorePaired.cpp | 4 ++-- .../Core/PowerPC/JitCommon/JitBackpatch.cpp | 2 +- .../Core/Core/PowerPC/JitCommon/JitBase.cpp | 9 +++++++++ Source/Core/Core/PowerPC/JitCommon/JitBase.h | 5 ++++- .../Core/Core/PowerPC/JitCommon/Jit_Util.cpp | 8 ++++---- .../JitILCommon/JitILBase_LoadStore.cpp | 20 +++++++++---------- .../JitILBase_LoadStoreFloating.cpp | 16 +++++++-------- .../JitILCommon/JitILBase_LoadStorePaired.cpp | 4 ++-- 24 files changed, 120 insertions(+), 94 deletions(-) diff --git a/Source/Core/Common/BreakPoints.cpp b/Source/Core/Common/BreakPoints.cpp index 74dcb45030..2ee89389d3 100644 --- a/Source/Core/Common/BreakPoints.cpp +++ b/Source/Core/Common/BreakPoints.cpp @@ -165,8 +165,13 @@ void MemChecks::AddFromStrings(const TMemChecksStr& mcstrs) void MemChecks::Add(const TMemCheck& _rMemoryCheck) { + bool had_any = HasAny(); if (GetMemCheck(_rMemoryCheck.StartAddress) == nullptr) m_MemChecks.push_back(_rMemoryCheck); + // If this is the first one, clear the JIT cache so it can switch to + // watchpoint-compatible code. + if (!had_any) + jit->ClearCache(); } void MemChecks::Remove(u32 _Address) @@ -179,6 +184,8 @@ void MemChecks::Remove(u32 _Address) return; } } + if (!HasAny()) + jit->ClearCache(); } TMemCheck *MemChecks::GetMemCheck(u32 address) diff --git a/Source/Core/Common/BreakPoints.h b/Source/Core/Common/BreakPoints.h index 631284bc03..370f4c96e0 100644 --- a/Source/Core/Common/BreakPoints.h +++ b/Source/Core/Common/BreakPoints.h @@ -105,6 +105,8 @@ public: void Remove(u32 _Address); void Clear() { m_MemChecks.clear(); } + + bool HasAny() const { return !m_MemChecks.empty(); } }; class Watches diff --git a/Source/Core/Core/HW/Memmap.cpp b/Source/Core/Core/HW/Memmap.cpp index bb964a344f..239d8603d4 100644 --- a/Source/Core/Core/HW/Memmap.cpp +++ b/Source/Core/Core/HW/Memmap.cpp @@ -235,10 +235,10 @@ void Clear() bool AreMemoryBreakpointsActivated() { -#ifndef ENABLE_MEM_CHECK - return false; -#else +#ifdef ENABLE_MEM_CHECK return true; +#else + return false; #endif } diff --git a/Source/Core/Core/PowerPC/Jit64/Jit.cpp b/Source/Core/Core/PowerPC/Jit64/Jit.cpp index f01a2cd39d..4ebd633331 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit.cpp @@ -178,14 +178,14 @@ void Jit64::Init() jo.optimizeGatherPipe = true; jo.accurateSinglePrecision = true; - js.memcheck = SConfig::GetInstance().m_LocalCoreStartupParameter.bMMU; + UpdateMemoryOptions(); js.fastmemLoadStore = nullptr; js.compilerPC = 0; gpr.SetEmitter(this); fpr.SetEmitter(this); - trampolines.Init(js.memcheck ? TRAMPOLINE_CODE_SIZE_MMU : TRAMPOLINE_CODE_SIZE); + trampolines.Init(jo.memcheck ? TRAMPOLINE_CODE_SIZE_MMU : TRAMPOLINE_CODE_SIZE); AllocCodeSpace(CODE_SIZE); // BLR optimization has the same consequences as block linking, as well as @@ -202,7 +202,7 @@ void Jit64::Init() // important: do this *after* generating the global asm routines, because we can't use farcode in them. // it'll crash because the farcode functions get cleared on JIT clears. - farcode.Init(js.memcheck ? FARCODE_SIZE_MMU : FARCODE_SIZE); + farcode.Init(jo.memcheck ? FARCODE_SIZE_MMU : FARCODE_SIZE); code_block.m_stats = &js.st; code_block.m_gpa = &js.gpa; @@ -216,6 +216,7 @@ void Jit64::ClearCache() trampolines.ClearCodeSpace(); farcode.ClearCodeSpace(); ClearCodeSpace(); + UpdateMemoryOptions(); m_clear_cache_asap = false; } @@ -788,7 +789,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitBloc Jit64Tables::CompileInstruction(ops[i]); - if (js.memcheck && (opinfo->flags & FL_LOADSTORE)) + if (jo.memcheck && (opinfo->flags & FL_LOADSTORE)) { // If we have a fastmem loadstore, we can omit the exception check and let fastmem handle it. FixupBranch memException; diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp index cec3523736..d1d89dcea8 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp @@ -182,14 +182,14 @@ void Jit64::lXXx(UGeckoInstruction inst) } else { - if ((inst.OPCD != 31) && gpr.R(a).IsImm() && !js.memcheck) + if ((inst.OPCD != 31) && gpr.R(a).IsImm() && !jo.memcheck) { u32 val = gpr.R(a).Imm32() + inst.SIMM_16; opAddress = Imm32(val); if (update) gpr.SetImmediate32(a, val); } - else if ((inst.OPCD == 31) && gpr.R(a).IsImm() && gpr.R(b).IsImm() && !js.memcheck) + else if ((inst.OPCD == 31) && gpr.R(a).IsImm() && gpr.R(b).IsImm() && !jo.memcheck) { u32 val = gpr.R(a).Imm32() + gpr.R(b).Imm32(); opAddress = Imm32(val); @@ -206,7 +206,7 @@ void Jit64::lXXx(UGeckoInstruction inst) offset = inst.OPCD == 31 ? gpr.R(b).SImm32() : (s32)inst.SIMM_16; // Depending on whether we have an immediate and/or update, find the optimum way to calculate // the load address. - if ((update || use_constant_offset) && !js.memcheck) + if ((update || use_constant_offset) && !jo.memcheck) { gpr.BindToRegister(a, true, update); opAddress = gpr.R(a); @@ -259,7 +259,7 @@ void Jit64::lXXx(UGeckoInstruction inst) // clobber it, then restore the value in the exception path. // TODO: no other load has to do this at the moment, since no other loads go directly to the // target registers, but if that ever changes, we need to do it there too. - if (js.memcheck) + if (jo.memcheck) { gpr.StoreFromRegister(d); js.revertGprLoad = d; @@ -392,7 +392,7 @@ void Jit64::stX(UGeckoInstruction inst) bool exception = WriteToConstAddress(accessSize, gpr.R(s), addr, CallerSavedRegistersInUse()); if (update) { - if (!js.memcheck || !exception) + if (!jo.memcheck || !exception) { gpr.SetImmediate32(a, addr); } @@ -445,7 +445,7 @@ void Jit64::stXx(UGeckoInstruction inst) int a = inst.RA, b = inst.RB, s = inst.RS; bool update = !!(inst.SUBOP10 & 32); bool byte_reverse = !!(inst.SUBOP10 & 512); - FALLBACK_IF(!a || (update && a == s) || (update && js.memcheck && a == b)); + FALLBACK_IF(!a || (update && a == s) || (update && jo.memcheck && a == b)); gpr.Lock(a, b, s); diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStoreFloating.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStoreFloating.cpp index 81cdd4f5ce..bb5c541cdb 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStoreFloating.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStoreFloating.cpp @@ -33,7 +33,7 @@ void Jit64::lfXXX(UGeckoInstruction inst) s32 offset = 0; OpArg addr = gpr.R(a); - if (update && js.memcheck) + if (update && jo.memcheck) { addr = R(RSCRATCH2); MOV(32, addr, gpr.R(a)); @@ -66,14 +66,14 @@ void Jit64::lfXXX(UGeckoInstruction inst) } fpr.Lock(d); - if (js.memcheck && single) + if (jo.memcheck && single) { fpr.StoreFromRegister(d); js.revertFprLoad = d; } fpr.BindToRegister(d, !single); BitSet32 registersInUse = CallerSavedRegistersInUse(); - if (update && js.memcheck) + if (update && jo.memcheck) registersInUse[RSCRATCH2] = true; SafeLoadToReg(RSCRATCH, addr, single ? 32 : 64, offset, registersInUse, false); @@ -87,7 +87,7 @@ void Jit64::lfXXX(UGeckoInstruction inst) MOVQ_xmm(XMM0, R(RSCRATCH)); MOVSD(fpr.RX(d), R(XMM0)); } - if (update && js.memcheck) + if (update && jo.memcheck) MOV(32, gpr.R(a), addr); fpr.UnlockAll(); gpr.UnlockAll(); @@ -108,7 +108,7 @@ void Jit64::stfXXX(UGeckoInstruction inst) s32 imm = (s16)inst.SIMM_16; int accessSize = single ? 32 : 64; - FALLBACK_IF(update && js.memcheck && a == b); + FALLBACK_IF(update && jo.memcheck && a == b); if (single) { @@ -138,7 +138,7 @@ void Jit64::stfXXX(UGeckoInstruction inst) if (update) { - if (!js.memcheck || !exception) + if (!jo.memcheck || !exception) { gpr.SetImmediate32(a, addr); } diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStorePaired.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStorePaired.cpp index bd52644d39..a3e288fc89 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStorePaired.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStorePaired.cpp @@ -40,7 +40,7 @@ void Jit64::psq_stXX(UGeckoInstruction inst) X64Reg addr = gpr.RX(a); // TODO: this is kind of ugly :/ we should probably create a universal load/store address calculation // function that handles all these weird cases, e.g. how non-fastmem loadstores clobber addresses. - bool storeAddress = (update && js.memcheck) || !SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem; + bool storeAddress = (update && jo.memcheck) || !jo.fastmem; if (storeAddress) { addr = RSCRATCH2; @@ -118,7 +118,7 @@ void Jit64::psq_stXX(UGeckoInstruction inst) ADD(32, R(RSCRATCH_EXTRA), Imm32((u32)offset)); } // In memcheck mode, don't update the address until the exception check - if (update && !js.memcheck) + if (update && !jo.memcheck) MOV(32, gpr.R(a), R(RSCRATCH_EXTRA)); // Some games (e.g. Dirt 2) incorrectly set the unused bits which breaks the lookup table code. // Hence, we need to mask out the unused bits. The layout of the GQR register is @@ -141,7 +141,7 @@ void Jit64::psq_stXX(UGeckoInstruction inst) CALLptr(MScaled(RSCRATCH, SCALE_8, (u32)(u64)asm_routines.pairedStoreQuantized)); } - if (update && js.memcheck) + if (update && jo.memcheck) { MemoryExceptionCheck(); if (indexed) @@ -174,7 +174,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst) s32 loadOffset = 0; gpr.BindToRegister(a, true, update); X64Reg addr = gpr.RX(a); - if (update && js.memcheck) + if (update && jo.memcheck) { addr = RSCRATCH2; MOV(32, R(addr), gpr.R(a)); @@ -209,7 +209,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst) } fpr.Lock(s); - if (js.memcheck) + if (jo.memcheck) { fpr.StoreFromRegister(s); js.revertFprLoad = s; @@ -217,7 +217,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst) fpr.BindToRegister(s, false); // Let's mirror the JitAsmCommon code and assume all non-MMU loads go to RAM. - if (!js.memcheck) + if (!jo.memcheck) { if (w) { @@ -295,7 +295,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst) ADD(32, R(RSCRATCH_EXTRA), Imm32((u32)offset)); } // In memcheck mode, don't update the address until the exception check - if (update && !js.memcheck) + if (update && !jo.memcheck) MOV(32, gpr.R(a), R(RSCRATCH_EXTRA)); MOV(32, R(RSCRATCH2), Imm32(0x3F07)); @@ -310,7 +310,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst) MemoryExceptionCheck(); CVTPS2PD(fpr.RX(s), R(XMM0)); - if (update && js.memcheck) + if (update && jo.memcheck) { if (indexed) ADD(32, gpr.R(a), gpr.R(b)); diff --git a/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp b/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp index d62b68d92a..786b9e1e10 100644 --- a/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp +++ b/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp @@ -420,7 +420,7 @@ void CommonAsmRoutines::GenQuantizedLoads() // If we find something that actually does do this, maybe this should be changed. How // much of a performance hit would it be? const u8* loadPairedFloatTwo = AlignCode4(); - if (jit->js.memcheck) + if (jit->jo.memcheck) { SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), 64, 0, QUANTIZED_REGS_TO_SAVE, false, SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG); ROL(64, R(RSCRATCH_EXTRA), Imm8(32)); @@ -440,7 +440,7 @@ void CommonAsmRoutines::GenQuantizedLoads() RET(); const u8* loadPairedFloatOne = AlignCode4(); - if (jit->js.memcheck) + if (jit->jo.memcheck) { SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), 32, 0, QUANTIZED_REGS_TO_SAVE, false, SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG); MOVD_xmm(XMM0, R(RSCRATCH_EXTRA)); @@ -461,7 +461,7 @@ void CommonAsmRoutines::GenQuantizedLoads() RET(); const u8* loadPairedU8Two = AlignCode4(); - if (jit->js.memcheck) + if (jit->jo.memcheck) { // TODO: Support not swapping in safeLoadToReg to avoid bswapping twice SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), 16, 0, QUANTIZED_REGS_TO_SAVE_LOAD, false, SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG); @@ -489,7 +489,7 @@ void CommonAsmRoutines::GenQuantizedLoads() RET(); const u8* loadPairedU8One = AlignCode4(); - if (jit->js.memcheck) + if (jit->jo.memcheck) SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), 8, 0, QUANTIZED_REGS_TO_SAVE_LOAD, false, SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG); else UnsafeLoadRegToRegNoSwap(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 8, 0); // RSCRATCH_EXTRA = 0x000000xx @@ -500,7 +500,7 @@ void CommonAsmRoutines::GenQuantizedLoads() RET(); const u8* loadPairedS8Two = AlignCode4(); - if (jit->js.memcheck) + if (jit->jo.memcheck) { // TODO: Support not swapping in safeLoadToReg to avoid bswapping twice SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), 16, 0, QUANTIZED_REGS_TO_SAVE_LOAD, false, SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG); @@ -528,7 +528,7 @@ void CommonAsmRoutines::GenQuantizedLoads() RET(); const u8* loadPairedS8One = AlignCode4(); - if (jit->js.memcheck) + if (jit->jo.memcheck) SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), 8, 0, QUANTIZED_REGS_TO_SAVE_LOAD, true, SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG); else UnsafeLoadRegToRegNoSwap(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 8, 0, true); @@ -540,7 +540,7 @@ void CommonAsmRoutines::GenQuantizedLoads() const u8* loadPairedU16Two = AlignCode4(); // TODO: Support not swapping in (un)safeLoadToReg to avoid bswapping twice - if (jit->js.memcheck) + if (jit->jo.memcheck) SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), 32, 0, QUANTIZED_REGS_TO_SAVE_LOAD, false, SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG); else UnsafeLoadRegToReg(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 32, 0, false); @@ -562,7 +562,7 @@ void CommonAsmRoutines::GenQuantizedLoads() RET(); const u8* loadPairedU16One = AlignCode4(); - if (jit->js.memcheck) + if (jit->jo.memcheck) SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), 16, 0, QUANTIZED_REGS_TO_SAVE_LOAD, false, SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG); else UnsafeLoadRegToReg(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 16, 0, false); @@ -573,7 +573,7 @@ void CommonAsmRoutines::GenQuantizedLoads() RET(); const u8* loadPairedS16Two = AlignCode4(); - if (jit->js.memcheck) + if (jit->jo.memcheck) SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), 32, 0, QUANTIZED_REGS_TO_SAVE_LOAD, false, SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG); else UnsafeLoadRegToReg(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 32, 0, false); @@ -595,7 +595,7 @@ void CommonAsmRoutines::GenQuantizedLoads() RET(); const u8* loadPairedS16One = AlignCode4(); - if (jit->js.memcheck) + if (jit->jo.memcheck) SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), 16, 0, QUANTIZED_REGS_TO_SAVE_LOAD, true, SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG); else UnsafeLoadRegToReg(RSCRATCH_EXTRA, RSCRATCH_EXTRA, 16, 0, true); diff --git a/Source/Core/Core/PowerPC/Jit64IL/JitIL.cpp b/Source/Core/Core/PowerPC/Jit64IL/JitIL.cpp index 945ded0a5a..2dfd652a70 100644 --- a/Source/Core/Core/PowerPC/Jit64IL/JitIL.cpp +++ b/Source/Core/Core/PowerPC/Jit64IL/JitIL.cpp @@ -246,14 +246,14 @@ void JitIL::Init() jo.optimizeGatherPipe = true; jo.accurateSinglePrecision = false; - js.memcheck = SConfig::GetInstance().m_LocalCoreStartupParameter.bMMU; + UpdateMemoryOptions(); - trampolines.Init(js.memcheck ? TRAMPOLINE_CODE_SIZE_MMU : TRAMPOLINE_CODE_SIZE); + trampolines.Init(jo.memcheck ? TRAMPOLINE_CODE_SIZE_MMU : TRAMPOLINE_CODE_SIZE); AllocCodeSpace(CODE_SIZE); blocks.Init(); asm_routines.Init(nullptr); - farcode.Init(js.memcheck ? FARCODE_SIZE_MMU : FARCODE_SIZE); + farcode.Init(jo.memcheck ? FARCODE_SIZE_MMU : FARCODE_SIZE); code_block.m_stats = &js.st; code_block.m_gpa = &js.gpa; @@ -624,7 +624,7 @@ const u8* JitIL::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitBloc if (!ops[i].skip) { - if (js.memcheck && (opinfo->flags & FL_USE_FPU)) + if (jo.memcheck && (opinfo->flags & FL_USE_FPU)) { ibuild.EmitFPExceptionCheck(ibuild.EmitIntConst(ops[i].address)); } @@ -644,7 +644,7 @@ const u8* JitIL::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitBloc JitILTables::CompileInstruction(ops[i]); - if (js.memcheck && (opinfo->flags & FL_LOADSTORE)) + if (jo.memcheck && (opinfo->flags & FL_LOADSTORE)) { ibuild.EmitDSIExceptionCheck(ibuild.EmitIntConst(ops[i].address)); } diff --git a/Source/Core/Core/PowerPC/JitArm32/Jit.cpp b/Source/Core/Core/PowerPC/JitArm32/Jit.cpp index a54f082aec..1deccec347 100644 --- a/Source/Core/Core/PowerPC/JitArm32/Jit.cpp +++ b/Source/Core/Core/PowerPC/JitArm32/Jit.cpp @@ -33,6 +33,7 @@ void JitArm::Init() fpr.Init(this); jo.enableBlocklink = true; jo.optimizeGatherPipe = true; + UpdateMemoryOptions(); code_block.m_stats = &js.st; code_block.m_gpa = &js.gpa; @@ -45,6 +46,7 @@ void JitArm::ClearCache() { ClearCodeSpace(); blocks.Clear(); + UpdateMemoryOptions(); } void JitArm::Shutdown() @@ -467,7 +469,7 @@ const u8* JitArm::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitBlo if (!ops[i].skip) { - if (js.memcheck && (opinfo->flags & FL_USE_FPU)) + if (jo.memcheck && (opinfo->flags & FL_USE_FPU)) { // Don't do this yet BKPT(0x7777); @@ -480,7 +482,7 @@ const u8* JitArm::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitBlo for (int j : ~ops[i].fprInUse) fpr.StoreFromRegister(j); - if (js.memcheck && (opinfo->flags & FL_LOADSTORE)) + if (jo.memcheck && (opinfo->flags & FL_LOADSTORE)) { // Don't do this yet BKPT(0x666); diff --git a/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStore.cpp b/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStore.cpp index 6a07ec274c..3dd95f4e0d 100644 --- a/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStore.cpp @@ -148,7 +148,7 @@ void JitArm::SafeStoreFromReg(s32 dest, u32 value, s32 regOffset, int accessSize else if (PowerPC::IsOptimizableRAMAddress(imm_addr)) { MOVI2R(rA, imm_addr); - EmitBackpatchRoutine(this, flags, SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, true, RS); + EmitBackpatchRoutine(this, flags, jo.fastmem, true, RS); } else { @@ -158,7 +158,7 @@ void JitArm::SafeStoreFromReg(s32 dest, u32 value, s32 regOffset, int accessSize } else { - EmitBackpatchRoutine(this, flags, SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, true, RS); + EmitBackpatchRoutine(this, flags, jo.fastmem, true, RS); } } @@ -351,7 +351,7 @@ void JitArm::SafeLoadToReg(ARMReg dest, s32 addr, s32 offsetReg, int accessSize, flags |= BackPatchInfo::FLAG_EXTEND; EmitBackpatchRoutine(this, flags, - SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, + jo.fastmem, true, dest); if (update) @@ -482,7 +482,7 @@ void JitArm::lmw(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(!SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem); + FALLBACK_IF(!jo.fastmem); u32 a = inst.RA; ARMReg rA = gpr.GetReg(); @@ -506,7 +506,7 @@ void JitArm::stmw(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(!SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem); + FALLBACK_IF(!jo.fastmem); u32 a = inst.RA; ARMReg rA = gpr.GetReg(); diff --git a/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStoreFloating.cpp b/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStoreFloating.cpp index 17b62e29f5..f22318e18f 100644 --- a/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStoreFloating.cpp +++ b/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStoreFloating.cpp @@ -182,7 +182,7 @@ void JitArm::lfXX(UGeckoInstruction inst) MOV(RA, addr); EmitBackpatchRoutine(this, flags, - SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, + jo.fastmem, !(is_immediate && PowerPC::IsOptimizableRAMAddress(imm_addr)), v0, v1); SetJumpTarget(DoNotLoad); @@ -387,7 +387,7 @@ void JitArm::stfXX(UGeckoInstruction inst) else if (PowerPC::IsOptimizableRAMAddress(imm_addr)) { MOVI2R(addr, imm_addr); - EmitBackpatchRoutine(this, flags, SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, false, v0); + EmitBackpatchRoutine(this, flags, jo.fastmem, false, v0); } else { @@ -397,7 +397,7 @@ void JitArm::stfXX(UGeckoInstruction inst) } else { - EmitBackpatchRoutine(this, flags, SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, true, v0); + EmitBackpatchRoutine(this, flags, jo.fastmem, true, v0); } } diff --git a/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStorePaired.cpp b/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStorePaired.cpp index dd68e747a4..bed9d6274f 100644 --- a/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStorePaired.cpp +++ b/Source/Core/Core/PowerPC/JitArm32/JitArm_LoadStorePaired.cpp @@ -24,7 +24,7 @@ void JitArm::psq_l(UGeckoInstruction inst) // R12 contains scale // R11 contains type // R10 is the ADDR - FALLBACK_IF(js.memcheck || !SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem); + FALLBACK_IF(jo.memcheck || !jo.fastmem); bool update = inst.OPCD == 57; s32 offset = inst.SIMM_12; @@ -76,7 +76,7 @@ void JitArm::psq_lx(UGeckoInstruction inst) // R12 contains scale // R11 contains type // R10 is the ADDR - FALLBACK_IF(js.memcheck || !SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem); + FALLBACK_IF(jo.memcheck || !jo.fastmem); bool update = inst.SUBOP10 == 38; @@ -127,7 +127,7 @@ void JitArm::psq_st(UGeckoInstruction inst) // R12 contains scale // R11 contains type // R10 is the ADDR - FALLBACK_IF(js.memcheck || !SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem); + FALLBACK_IF(jo.memcheck || !jo.fastmem); bool update = inst.OPCD == 61; s32 offset = inst.SIMM_12; @@ -179,7 +179,7 @@ void JitArm::psq_stx(UGeckoInstruction inst) // R12 contains scale // R11 contains type // R10 is the ADDR - FALLBACK_IF(js.memcheck || !SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem); + FALLBACK_IF(jo.memcheck || !jo.fastmem); bool update = inst.SUBOP10 == 39; diff --git a/Source/Core/Core/PowerPC/JitArm64/Jit.cpp b/Source/Core/Core/PowerPC/JitArm64/Jit.cpp index 70b9d3c914..a57a5b3d7f 100644 --- a/Source/Core/Core/PowerPC/JitArm64/Jit.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/Jit.cpp @@ -17,6 +17,7 @@ void JitArm64::Init() AllocCodeSpace(CODE_SIZE); jo.enableBlocklink = true; jo.optimizeGatherPipe = true; + UpdateMemoryOptions(); gpr.Init(this); fpr.Init(this); @@ -34,6 +35,7 @@ void JitArm64::ClearCache() { ClearCodeSpace(); blocks.Clear(); + UpdateMemoryOptions(); } void JitArm64::Shutdown() @@ -295,7 +297,7 @@ const u8* JitArm64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitB if (!ops[i].skip) { - if (js.memcheck && (opinfo->flags & FL_USE_FPU)) + if (jo.memcheck && (opinfo->flags & FL_USE_FPU)) { // Don't do this yet BRK(0x7777); @@ -309,7 +311,7 @@ const u8* JitArm64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer *code_buf, JitB for (int j : ~ops[i].fprInUse) fpr.StoreRegister(j); - if (js.memcheck && (opinfo->flags & FL_LOADSTORE)) + if (jo.memcheck && (opinfo->flags & FL_LOADSTORE)) { // Don't do this yet BRK(0x666); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp index d3a48ea677..fd4f7d1fc0 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp @@ -172,8 +172,8 @@ void JitArm64::SafeLoadToReg(u32 dest, s32 addr, s32 offsetReg, u32 flags, s32 o ABI_PushRegisters(regs_in_use); m_float_emit.ABI_PushRegisters(fprs_in_use, X30); EmitBackpatchRoutine(this, flags, - SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, - SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, + jo.fastmem, + jo.fastmem, dest_reg, XA); m_float_emit.ABI_PopRegisters(fprs_in_use, X30); ABI_PopRegisters(regs_in_use); @@ -323,8 +323,8 @@ void JitArm64::SafeStoreFromReg(s32 dest, u32 value, s32 regOffset, u32 flags, s ABI_PushRegisters(regs_in_use); m_float_emit.ABI_PushRegisters(fprs_in_use, X30); EmitBackpatchRoutine(this, flags, - SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, - SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, + jo.fastmem, + jo.fastmem, RS, XA); m_float_emit.ABI_PopRegisters(fprs_in_use, X30); ABI_PopRegisters(regs_in_use); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStoreFloating.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStoreFloating.cpp index 04c82329a0..519fd3143d 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStoreFloating.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStoreFloating.cpp @@ -196,8 +196,8 @@ void JitArm64::lfXX(UGeckoInstruction inst) ABI_PushRegisters(regs_in_use); m_float_emit.ABI_PushRegisters(fprs_in_use, X30); EmitBackpatchRoutine(this, flags, - SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, - SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, + jo.fastmem, + jo.fastmem, VD, XA); m_float_emit.ABI_PopRegisters(fprs_in_use, X30); ABI_PopRegisters(regs_in_use); @@ -426,8 +426,8 @@ void JitArm64::stfXX(UGeckoInstruction inst) ABI_PushRegisters(regs_in_use); m_float_emit.ABI_PushRegisters(fprs_in_use, X30); EmitBackpatchRoutine(this, flags, - SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, - SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem, + jo.fastmem, + jo.fastmem, V0, XA); m_float_emit.ABI_PopRegisters(fprs_in_use, X30); ABI_PopRegisters(regs_in_use); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStorePaired.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStorePaired.cpp index d3b51a1b49..e6ad445bc7 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStorePaired.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStorePaired.cpp @@ -20,7 +20,7 @@ void JitArm64::psq_l(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStorePairedOff); - FALLBACK_IF(js.memcheck || !SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem); + FALLBACK_IF(jo.memcheck || !jo.fastmem); // X30 is LR // X0 contains the scale @@ -83,7 +83,7 @@ void JitArm64::psq_st(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStorePairedOff); - FALLBACK_IF(js.memcheck || !SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem); + FALLBACK_IF(jo.memcheck || !jo.fastmem); // X30 is LR // X0 contains the scale diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp b/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp index 5039996c43..411c6e3872 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/JitBackpatch.cpp @@ -77,7 +77,7 @@ bool Jitx86Base::BackPatch(u32 emAddress, SContext* ctx) BitSet32 registersInUse = it->second; u8* exceptionHandler = nullptr; - if (jit->js.memcheck) + if (jit->jo.memcheck) { auto it2 = exceptionHandlerAtLoc.find(codePtr); if (it2 != exceptionHandlerAtLoc.end()) diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp b/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp index 9df078874b..a2077eaee9 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp @@ -82,3 +82,12 @@ bool JitBase::MergeAllowedNextInstructions(int count) } return true; } + +void JitBase::UpdateMemoryOptions() +{ + bool any_watchpoints = PowerPC::memchecks.HasAny(); + jo.fastmem = SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem && + !any_watchpoints; + jo.memcheck = SConfig::GetInstance().m_LocalCoreStartupParameter.bMMU || + any_watchpoints; +} diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBase.h b/Source/Core/Core/PowerPC/JitCommon/JitBase.h index dcd869009a..a207637482 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitBase.h +++ b/Source/Core/Core/PowerPC/JitCommon/JitBase.h @@ -61,6 +61,8 @@ protected: bool enableBlocklink; bool optimizeGatherPipe; bool accurateSinglePrecision; + bool fastmem; + bool memcheck; }; struct JitState { @@ -85,7 +87,6 @@ protected: bool assumeNoPairedQuantize; bool firstFPInstructionFound; bool isLastInstruction; - bool memcheck; int skipInstructions; bool carryFlagSet; bool carryFlagInverted; @@ -109,6 +110,8 @@ protected: bool MergeAllowedNextInstructions(int count); + void UpdateMemoryOptions(); + public: // This should probably be removed from public: JitOptions jo; diff --git a/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp b/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp index 58080b711f..e2f9d4555c 100644 --- a/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp @@ -14,7 +14,7 @@ using namespace Gen; void EmuCodeBlock::MemoryExceptionCheck() { - if (jit->js.memcheck && !jit->js.fastmemLoadStore && !jit->js.fixupExceptionHandler) + if (jit->jo.memcheck && !jit->js.fastmemLoadStore && !jit->js.fixupExceptionHandler) { TEST(32, PPCSTATE(Exceptions), Gen::Imm32(EXCEPTION_DSI)); jit->js.exceptionHandler = J_CC(Gen::CC_NZ, true); @@ -254,7 +254,7 @@ FixupBranch EmuCodeBlock::CheckIfSafeAddress(OpArg reg_value, X64Reg reg_addr, B // assuming they'll never do an invalid memory access. // The slightly more complex check needed for Wii games using the space just above MEM1 isn't // implemented here yet, since there are no known working Wii MMU games to test it with. - if (jit->js.memcheck && !SConfig::GetInstance().m_LocalCoreStartupParameter.bWii) + if (jit->jo.memcheck && !SConfig::GetInstance().m_LocalCoreStartupParameter.bWii) { if (scratch == reg_addr) PUSH(scratch); @@ -276,7 +276,7 @@ FixupBranch EmuCodeBlock::CheckIfSafeAddress(OpArg reg_value, X64Reg reg_addr, B void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress, int accessSize, s32 offset, BitSet32 registersInUse, bool signExtend, int flags) { registersInUse[reg_value] = false; - if (SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem && + if (jit->jo.fastmem && !opAddress.IsImm() && !(flags & (SAFE_LOADSTORE_NO_SWAP | SAFE_LOADSTORE_NO_FASTMEM)) #ifdef ENABLE_MEM_CHECK @@ -521,7 +521,7 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces reg_value = FixImmediate(accessSize, reg_value); // TODO: support byte-swapped non-immediate fastmem stores - if (SConfig::GetInstance().m_LocalCoreStartupParameter.bFastmem && + if (jit->jo.fastmem && !(flags & SAFE_LOADSTORE_NO_FASTMEM) && (reg_value.IsImm() || !(flags & SAFE_LOADSTORE_NO_SWAP)) #ifdef ENABLE_MEM_CHECK diff --git a/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp b/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp index 46ca149d65..229a464211 100644 --- a/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStore.cpp @@ -9,7 +9,7 @@ void JitILBase::lhax(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitLoadGReg(inst.RB); if (inst.RA) @@ -24,7 +24,7 @@ void JitILBase::lhaux(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitLoadGReg(inst.RB); addr = ibuild.EmitAdd(addr, ibuild.EmitLoadGReg(inst.RA)); @@ -39,7 +39,7 @@ void JitILBase::lXz(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_16); if (inst.RA) @@ -101,7 +101,7 @@ void JitILBase::lha(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst((s32)(s16)inst.SIMM_16); @@ -117,7 +117,7 @@ void JitILBase::lhau(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst((s32)inst.SIMM_16); @@ -133,7 +133,7 @@ void JitILBase::lXzx(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitLoadGReg(inst.RB); @@ -203,7 +203,7 @@ void JitILBase::stX(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_16); IREmitter::InstLoc value = ibuild.EmitLoadGReg(inst.RS); @@ -234,7 +234,7 @@ void JitILBase::stXx(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitLoadGReg(inst.RB); IREmitter::InstLoc value = ibuild.EmitLoadGReg(inst.RS); @@ -266,7 +266,7 @@ void JitILBase::lmw(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_16); @@ -285,7 +285,7 @@ void JitILBase::stmw(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_16); diff --git a/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStoreFloating.cpp b/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStoreFloating.cpp index 5401846467..9394f002a8 100644 --- a/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStoreFloating.cpp +++ b/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStoreFloating.cpp @@ -13,7 +13,7 @@ void JitILBase::lfs(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreFloatingOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_16); @@ -28,7 +28,7 @@ void JitILBase::lfsu(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreFloatingOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_16); @@ -43,7 +43,7 @@ void JitILBase::lfd(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreFloatingOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_16); @@ -59,7 +59,7 @@ void JitILBase::lfdu(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreFloatingOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_16); @@ -75,7 +75,7 @@ void JitILBase::stfd(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreFloatingOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_16); IREmitter::InstLoc val = ibuild.EmitLoadFReg(inst.RS); @@ -93,7 +93,7 @@ void JitILBase::stfs(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreFloatingOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_16); IREmitter::InstLoc val = ibuild.EmitLoadFReg(inst.RS); @@ -112,7 +112,7 @@ void JitILBase::stfsx(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreFloatingOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitLoadGReg(inst.RB); IREmitter::InstLoc val = ibuild.EmitLoadFReg(inst.RS); @@ -129,7 +129,7 @@ void JitILBase::lfsx(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreFloatingOff); - FALLBACK_IF(js.memcheck); + FALLBACK_IF(jo.memcheck); IREmitter::InstLoc addr = ibuild.EmitLoadGReg(inst.RB), val; diff --git a/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStorePaired.cpp b/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStorePaired.cpp index 7e881139dc..dfcd6f8ad9 100644 --- a/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStorePaired.cpp +++ b/Source/Core/Core/PowerPC/JitILCommon/JitILBase_LoadStorePaired.cpp @@ -9,7 +9,7 @@ void JitILBase::psq_st(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStorePairedOff); - FALLBACK_IF(js.memcheck || inst.W); + FALLBACK_IF(jo.memcheck || inst.W); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_12); IREmitter::InstLoc val; @@ -29,7 +29,7 @@ void JitILBase::psq_l(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStorePairedOff); - FALLBACK_IF(js.memcheck || inst.W); + FALLBACK_IF(jo.memcheck || inst.W); IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_12); IREmitter::InstLoc val; From dd7ab4812b7b315c762a0224bbd12187168f3cff Mon Sep 17 00:00:00 2001 From: comex Date: Thu, 23 Apr 2015 00:41:36 -0400 Subject: [PATCH 5/7] On x86, disabling fastmem isn't enough actually. Without fastmem, the JIT code still does an inline check for RAM addresses. With watchpoints we have to disable that too. (Hardware watchpoints would avoid all the slow, but be complicated to implement and limited in number - I doubt most people debugging games care much if they run slower.) With this change and watchpoints enabled, Melee runs at no more than 40% speed, despite running at full speed without them. Oh well. Better works slowly than doesn't bloody work. Incidentally, I'm getting an unrelated crash in PowerPC::HostIsRAMAddress when shutting down a game. This code sucks. --- .../Core/Core/PowerPC/JitCommon/JitBase.cpp | 2 ++ Source/Core/Core/PowerPC/JitCommon/JitBase.h | 1 + .../Core/Core/PowerPC/JitCommon/Jit_Util.cpp | 30 +++++++++++-------- Source/Core/Core/PowerPC/MMU.cpp | 12 ++++++++ 4 files changed, 33 insertions(+), 12 deletions(-) diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp b/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp index a2077eaee9..d03df55846 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/JitBase.cpp @@ -90,4 +90,6 @@ void JitBase::UpdateMemoryOptions() !any_watchpoints; jo.memcheck = SConfig::GetInstance().m_LocalCoreStartupParameter.bMMU || any_watchpoints; + jo.alwaysUseMemFuncs = any_watchpoints; + } diff --git a/Source/Core/Core/PowerPC/JitCommon/JitBase.h b/Source/Core/Core/PowerPC/JitCommon/JitBase.h index a207637482..15b89156b6 100644 --- a/Source/Core/Core/PowerPC/JitCommon/JitBase.h +++ b/Source/Core/Core/PowerPC/JitCommon/JitBase.h @@ -63,6 +63,7 @@ protected: bool accurateSinglePrecision; bool fastmem; bool memcheck; + bool alwaysUseMemFuncs; }; struct JitState { diff --git a/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp b/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp index e2f9d4555c..0ed3ddbb3a 100644 --- a/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp @@ -349,14 +349,17 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress, LEA(32, RSCRATCH, MDisp(opAddress.GetSimpleReg(), offset)); } - FixupBranch slow, exit; - slow = CheckIfSafeAddress(R(reg_value), reg_addr, registersInUse, mem_mask); - UnsafeLoadToReg(reg_value, R(reg_addr), accessSize, 0, signExtend); - if (farcode.Enabled()) - SwitchToFarCode(); - else - exit = J(true); - SetJumpTarget(slow); + FixupBranch exit; + if (!jit->jo.alwaysUseMemFuncs) + { + FixupBranch slow = CheckIfSafeAddress(R(reg_value), reg_addr, registersInUse, mem_mask); + UnsafeLoadToReg(reg_value, R(reg_addr), accessSize, 0, signExtend); + if (farcode.Enabled()) + SwitchToFarCode(); + else + exit = J(true); + SetJumpTarget(slow); + } size_t rsp_alignment = (flags & SAFE_LOADSTORE_NO_PROLOG) ? 8 : 0; ABI_PushRegistersAndAdjustStack(registersInUse, rsp_alignment); switch (accessSize) @@ -387,12 +390,15 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress, MOVZX(64, accessSize, reg_value, R(ABI_RETURN)); } - if (farcode.Enabled()) + if (!jit->jo.alwaysUseMemFuncs) { - exit = J(true); - SwitchToNearCode(); + if (farcode.Enabled()) + { + exit = J(true); + SwitchToNearCode(); + } + SetJumpTarget(exit); } - SetJumpTarget(exit); } static OpArg SwapImmediate(int accessSize, OpArg reg_value) diff --git a/Source/Core/Core/PowerPC/MMU.cpp b/Source/Core/Core/PowerPC/MMU.cpp index 308c7d482b..4fc09c7f19 100644 --- a/Source/Core/Core/PowerPC/MMU.cpp +++ b/Source/Core/Core/PowerPC/MMU.cpp @@ -629,6 +629,10 @@ std::string HostGetString(u32 address, size_t size) bool IsOptimizableRAMAddress(const u32 address) { +#ifdef ENABLE_MEM_CHECK + return false; +#endif + if (!UReg_MSR(MSR).DR) return false; @@ -752,6 +756,10 @@ void ClearCacheLine(const u32 address) u32 IsOptimizableMMIOAccess(u32 address, u32 accessSize) { +#ifdef ENABLE_MEM_CHECK + return 0; +#endif + if (!UReg_MSR(MSR).DR) return 0; @@ -767,6 +775,10 @@ u32 IsOptimizableMMIOAccess(u32 address, u32 accessSize) bool IsOptimizableGatherPipeWrite(u32 address) { +#ifdef ENABLE_MEM_CHECK + return false; +#endif + if (!UReg_MSR(MSR).DR) return false; From 2264e7b087ae796695979d2d5c75cd5eef1e03b7 Mon Sep 17 00:00:00 2001 From: comex Date: Thu, 23 Apr 2015 01:22:35 -0400 Subject: [PATCH 6/7] Use a fake exception to exit early in case of memory breakpoints. Change TMemCheck::Action to return whether to break rather than calling PPCDebugInterface::BreakNow, as this simplified the implementation; then remove said method, as that was its only caller. One "interface" method down, many to go... --- Source/Core/Common/BreakPoints.cpp | 6 +++--- Source/Core/Common/BreakPoints.h | 3 ++- Source/Core/Common/DebugInterface.h | 1 - .../Core/Core/Debugger/PPCDebugInterface.cpp | 5 ----- Source/Core/Core/Debugger/PPCDebugInterface.h | 1 - Source/Core/Core/HW/CPU.cpp | 10 +++++++++- Source/Core/Core/PowerPC/Gekko.h | 4 +++- Source/Core/Core/PowerPC/MMU.cpp | 20 ++++++++++++++++++- Source/Core/Core/PowerPC/PowerPC.cpp | 6 ++++++ 9 files changed, 42 insertions(+), 14 deletions(-) diff --git a/Source/Core/Common/BreakPoints.cpp b/Source/Core/Common/BreakPoints.cpp index 2ee89389d3..277b956e61 100644 --- a/Source/Core/Common/BreakPoints.cpp +++ b/Source/Core/Common/BreakPoints.cpp @@ -207,7 +207,7 @@ TMemCheck *MemChecks::GetMemCheck(u32 address) return nullptr; } -void TMemCheck::Action(DebugInterface *debug_interface, u32 iValue, u32 addr, bool write, int size, u32 pc) +bool TMemCheck::Action(DebugInterface *debug_interface, u32 iValue, u32 addr, bool write, int size, u32 pc) { if ((write && OnWrite) || (!write && OnRead)) { @@ -220,9 +220,9 @@ void TMemCheck::Action(DebugInterface *debug_interface, u32 iValue, u32 addr, bo ); } - if (Break) - debug_interface->BreakNow(); + return true; } + return false; } diff --git a/Source/Core/Common/BreakPoints.h b/Source/Core/Common/BreakPoints.h index 370f4c96e0..91dd74ae20 100644 --- a/Source/Core/Common/BreakPoints.h +++ b/Source/Core/Common/BreakPoints.h @@ -40,7 +40,8 @@ struct TMemCheck u32 numHits; - void Action(DebugInterface *dbg_interface, u32 _iValue, u32 addr, + // returns whether to break + bool Action(DebugInterface *dbg_interface, u32 _iValue, u32 addr, bool write, int size, u32 pc); }; diff --git a/Source/Core/Common/DebugInterface.h b/Source/Core/Common/DebugInterface.h index a21921d496..9e30870eff 100644 --- a/Source/Core/Common/DebugInterface.h +++ b/Source/Core/Common/DebugInterface.h @@ -30,7 +30,6 @@ public: virtual void SetPC(unsigned int /*address*/) {} virtual void Step() {} virtual void RunToBreakpoint() {} - virtual void BreakNow() {} virtual void InsertBLR(unsigned int /*address*/, unsigned int /*value*/) {} virtual int GetColor(unsigned int /*address*/){return 0xFFFFFFFF;} virtual std::string GetDescription(unsigned int /*address*/) = 0; diff --git a/Source/Core/Core/Debugger/PPCDebugInterface.cpp b/Source/Core/Core/Debugger/PPCDebugInterface.cpp index 2a70213d35..81ffa1bd0e 100644 --- a/Source/Core/Core/Debugger/PPCDebugInterface.cpp +++ b/Source/Core/Core/Debugger/PPCDebugInterface.cpp @@ -169,11 +169,6 @@ void PPCDebugInterface::InsertBLR(unsigned int address, unsigned int value) PowerPC::HostWrite_U32(value, address); } -void PPCDebugInterface::BreakNow() -{ - CCPU::Break(); -} - // ======================================================= // Separate the blocks with colors. diff --git a/Source/Core/Core/Debugger/PPCDebugInterface.h b/Source/Core/Core/Debugger/PPCDebugInterface.h index 0cecef0ea6..2a4753cb96 100644 --- a/Source/Core/Core/Debugger/PPCDebugInterface.h +++ b/Source/Core/Core/Debugger/PPCDebugInterface.h @@ -39,7 +39,6 @@ public: virtual unsigned int GetPC() override; virtual void SetPC(unsigned int address) override; virtual void Step() override {} - virtual void BreakNow() override; virtual void RunToBreakpoint() override; virtual void InsertBLR(unsigned int address, unsigned int value) override; virtual int GetColor(unsigned int address) override; diff --git a/Source/Core/Core/HW/CPU.cpp b/Source/Core/Core/HW/CPU.cpp index 953a4c9633..f583cc3dfe 100644 --- a/Source/Core/Core/HW/CPU.cpp +++ b/Source/Core/Core/HW/CPU.cpp @@ -13,6 +13,7 @@ #include "Core/Movie.h" #include "Core/HW/CPU.h" #include "Core/HW/DSP.h" +#include "Core/HW/Memmap.h" #include "Core/PowerPC/PowerPC.h" #include "VideoCommon/VideoBackendBase.h" @@ -117,7 +118,14 @@ void CCPU::EnableStepping(const bool _bStepping) { // SingleStep so that the "continue", "step over" and "step out" debugger functions // work when the PC is at a breakpoint at the beginning of the block - if (PowerPC::breakpoints.IsAddressBreakPoint(PC) && PowerPC::GetMode() != PowerPC::MODE_INTERPRETER) + // If watchpoints are enabled, any instruction could be a breakpoint. + bool could_be_bp; +#ifdef ENABLE_MEM_CHECK + could_be_bp = true; +#else + could_be_bp = PowerPC::breakpoints.IsAddressBreakPoint(PC); +#endif + if (could_be_bp && PowerPC::GetMode() != PowerPC::MODE_INTERPRETER) { PowerPC::CoreMode oldMode = PowerPC::GetMode(); PowerPC::SetMode(PowerPC::MODE_INTERPRETER); diff --git a/Source/Core/Core/PowerPC/Gekko.h b/Source/Core/Core/PowerPC/Gekko.h index 14f4083af6..4b8921c3e4 100644 --- a/Source/Core/Core/PowerPC/Gekko.h +++ b/Source/Core/Core/PowerPC/Gekko.h @@ -831,7 +831,9 @@ enum EXCEPTION_ALIGNMENT = 0x00000020, EXCEPTION_FPU_UNAVAILABLE = 0x00000040, EXCEPTION_PROGRAM = 0x00000080, - EXCEPTION_PERFORMANCE_MONITOR = 0x00000100 + EXCEPTION_PERFORMANCE_MONITOR = 0x00000100, + + EXCEPTION_FAKE_MEMCHECK_HIT = 0x00000200, }; inline s32 SignExt16(s16 x) {return (s32)(s16)x;} diff --git a/Source/Core/Core/PowerPC/MMU.cpp b/Source/Core/Core/PowerPC/MMU.cpp index 4fc09c7f19..b283354e72 100644 --- a/Source/Core/Core/PowerPC/MMU.cpp +++ b/Source/Core/Core/PowerPC/MMU.cpp @@ -21,6 +21,7 @@ #include "Core/ConfigManager.h" #include "Core/Core.h" +#include "Core/HW/CPU.h" #include "Core/HW/GPFifo.h" #include "Core/HW/Memmap.h" #include "Core/HW/MMIO.h" @@ -455,8 +456,25 @@ static __forceinline void Memcheck(u32 address, u32 var, bool write, int size) TMemCheck *mc = PowerPC::memchecks.GetMemCheck(address); if (mc) { + if (CCPU::IsStepping()) + { + // Disable when stepping so that resume works. + return; + } mc->numHits++; - mc->Action(&PowerPC::debug_interface, var, address, write, size, PC); + bool pause = mc->Action(&PowerPC::debug_interface, var, address, write, size, PC); + if (pause) + { + CCPU::Break(); + // Fake a DSI so that all the code that tests for it in order to skip + // the rest of the instruction will apply. (This means that + // watchpoints will stop the emulator before the offending load/store, + // not after like GDB does, but that's better anyway. Just need to + // make sure resuming after that works.) + // It doesn't matter if ReadFromHardware triggers its own DSI because + // we'll take it after resuming. + PowerPC::ppcState.Exceptions |= EXCEPTION_DSI | EXCEPTION_FAKE_MEMCHECK_HIT; + } } #endif } diff --git a/Source/Core/Core/PowerPC/PowerPC.cpp b/Source/Core/Core/PowerPC/PowerPC.cpp index ce69bd7d01..b9ec4ca085 100644 --- a/Source/Core/Core/PowerPC/PowerPC.cpp +++ b/Source/Core/Core/PowerPC/PowerPC.cpp @@ -375,6 +375,12 @@ void CheckExceptions() INFO_LOG(POWERPC, "EXCEPTION_FPU_UNAVAILABLE"); ppcState.Exceptions &= ~EXCEPTION_FPU_UNAVAILABLE; } +#ifdef ENABLE_MEM_CHECK + else if (exceptions & EXCEPTION_FAKE_MEMCHECK_HIT) + { + ppcState.Exceptions &= ~EXCEPTION_DSI & ~EXCEPTION_FAKE_MEMCHECK_HIT; + } +#endif else if (exceptions & EXCEPTION_DSI) { SRR0 = PC; From 132e1068ce7b2cc39d52facff9a33d0f3a582ae8 Mon Sep 17 00:00:00 2001 From: comex Date: Thu, 23 Apr 2015 02:47:15 -0400 Subject: [PATCH 7/7] Remove checks that disable fastmem if debugging and ENABLE_MEM_CHECK are enabled. They weren't sufficient and are made redundant by previous commits; they also (on master) caused breakage due to Jit64::psq_stXX assuming writes would be fastmem and not clobber a register under certain conditions. That really needs to be refactored, but for now, this works. --- Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp b/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp index 0ed3ddbb3a..a0a86faf50 100644 --- a/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp +++ b/Source/Core/Core/PowerPC/JitCommon/Jit_Util.cpp @@ -278,11 +278,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress, registersInUse[reg_value] = false; if (jit->jo.fastmem && !opAddress.IsImm() && - !(flags & (SAFE_LOADSTORE_NO_SWAP | SAFE_LOADSTORE_NO_FASTMEM)) -#ifdef ENABLE_MEM_CHECK - && !SConfig::GetInstance().m_LocalCoreStartupParameter.bEnableDebugging -#endif - ) + !(flags & (SAFE_LOADSTORE_NO_SWAP | SAFE_LOADSTORE_NO_FASTMEM))) { u8 *mov = UnsafeLoadToReg(reg_value, opAddress, accessSize, offset, signExtend); @@ -529,11 +525,7 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces // TODO: support byte-swapped non-immediate fastmem stores if (jit->jo.fastmem && !(flags & SAFE_LOADSTORE_NO_FASTMEM) && - (reg_value.IsImm() || !(flags & SAFE_LOADSTORE_NO_SWAP)) -#ifdef ENABLE_MEM_CHECK - && !SConfig::GetInstance().m_LocalCoreStartupParameter.bEnableDebugging -#endif - ) + (reg_value.IsImm() || !(flags & SAFE_LOADSTORE_NO_SWAP))) { const u8* backpatchStart = GetCodePtr(); u8* mov = UnsafeWriteRegToReg(reg_value, reg_addr, accessSize, offset, !(flags & SAFE_LOADSTORE_NO_SWAP));