diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index b7600fd75..f224c63d8 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -56,6 +56,10 @@ add_library(common
minizip_helpers.h
null_audio_stream.cpp
null_audio_stream.h
+ memory_arena.cpp
+ memory_arena.h
+ page_fault_handler.cpp
+ page_fault_handler.h
rectangle.h
progress_callback.cpp
progress_callback.h
@@ -180,3 +184,8 @@ if(APPLE AND NOT BUILD_LIBRETRO_CORE)
gl/context_agl.h
)
endif()
+
+if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
+ # We need -lrt for shm_unlink
+ target_link_libraries(common PRIVATE rt)
+endif()
diff --git a/src/common/common.vcxproj b/src/common/common.vcxproj
index f81abf94c..fd08eb5c1 100644
--- a/src/common/common.vcxproj
+++ b/src/common/common.vcxproj
@@ -70,6 +70,8 @@
+
+
@@ -130,6 +132,8 @@
+
+
@@ -543,4 +547,4 @@
-
\ No newline at end of file
+
diff --git a/src/common/common.vcxproj.filters b/src/common/common.vcxproj.filters
index 2903f3d34..62268c8f4 100644
--- a/src/common/common.vcxproj.filters
+++ b/src/common/common.vcxproj.filters
@@ -103,6 +103,8 @@
+
+
@@ -198,6 +200,8 @@
+
+
@@ -213,4 +217,4 @@
{642ff5eb-af39-4aab-a42f-6eb8188a11d7}
-
\ No newline at end of file
+
diff --git a/src/common/memory_arena.cpp b/src/common/memory_arena.cpp
new file mode 100644
index 000000000..cdb52ff11
--- /dev/null
+++ b/src/common/memory_arena.cpp
@@ -0,0 +1,213 @@
+#include "memory_arena.h"
+#include "common/assert.h"
+#include "common/log.h"
+#include "common/string_util.h"
+Log_SetChannel(Common::MemoryArena);
+
+#if defined(WIN32)
+#include "common/windows_headers.h"
+#elif defined(__linux__) || defined(__ANDROID__)
+#include
+#include
+#include
+#include
+#endif
+
+namespace Common {
+
+MemoryArena::MemoryArena() = default;
+
+MemoryArena::~MemoryArena()
+{
+#if defined(WIN32)
+ if (m_file_handle)
+ CloseHandle(m_file_handle);
+#elif defined(__linux__)
+ if (m_shmem_fd > 0)
+ close(m_shmem_fd);
+#endif
+}
+
+void* MemoryArena::FindBaseAddressForMapping(size_t size)
+{
+ void* base_address;
+#if defined(WIN32)
+ base_address = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_READWRITE);
+ if (base_address)
+ VirtualFree(base_address, 0, MEM_RELEASE);
+#elif defined(__linux__)
+ base_address = mmap(nullptr, size, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
+ if (base_address)
+ munmap(base_address, size);
+#elif defined(__ANDROID__)
+ base_address = mmap(nullptr, size, PROT_NONE, MAP_ANON | MAP_SHARED, -1, 0);
+ if (base_address)
+ munmap(base_address, size);
+#else
+ base_address = nullptr;
+#endif
+
+ if (!base_address)
+ {
+ Log_ErrorPrintf("Failed to get base address for memory mapping of size %zu", size);
+ return nullptr;
+ }
+
+ return base_address;
+}
+
+bool MemoryArena::Create(size_t size, bool writable, bool executable)
+{
+#if defined(WIN32)
+ const std::string file_mapping_name =
+ StringUtil::StdStringFromFormat("common_memory_arena_%zu_%u", size, GetCurrentProcessId());
+
+ const DWORD protect = (writable ? (executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE) : PAGE_READONLY);
+ m_file_handle = CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, protect, Truncate32(size >> 32), Truncate32(size),
+ file_mapping_name.c_str());
+ if (!m_file_handle)
+ {
+ Log_ErrorPrintf("CreateFileMapping failed: %u", GetLastError());
+ return false;
+ }
+
+ return true;
+#elif defined(__linux__)
+ const std::string file_mapping_name =
+ StringUtil::StdStringFromFormat("common_memory_arena_%zu_%u", size, static_cast(getpid()));
+ m_shmem_fd = shm_open(file_mapping_name.c_str(), O_CREAT | O_EXCL | (writable ? O_RDWR : O_RDONLY), 0600);
+ if (m_shmem_fd < 0)
+ {
+ Log_ErrorPrintf("shm_open failed: %d", errno);
+ return false;
+ }
+
+ // we're not going to be opening this mapping in other processes, so remove the file
+ shm_unlink(file_mapping_name.c_str());
+
+ // ensure it's the correct size
+ if (ftruncate64(m_shmem_fd, static_cast(size)) < 0)
+ {
+ Log_ErrorPrintf("ftruncate64(%zu) failed: %d", size, errno);
+ return false;
+ }
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+std::optional MemoryArena::CreateView(size_t offset, size_t size, bool writable, bool executable,
+ void* fixed_address)
+{
+ void* base_pointer = CreateViewPtr(offset, size, writable, executable, fixed_address);
+ if (!base_pointer)
+ return std::nullopt;
+
+ return View(this, base_pointer, offset, size, writable);
+}
+
+void* MemoryArena::CreateViewPtr(size_t offset, size_t size, bool writable, bool executable,
+ void* fixed_address /*= nullptr*/)
+{
+ void* base_pointer;
+#if defined(WIN32)
+ const DWORD desired_access = FILE_MAP_READ | (writable ? FILE_MAP_WRITE : 0) | (executable ? FILE_MAP_EXECUTE : 0);
+ base_pointer =
+ MapViewOfFileEx(m_file_handle, desired_access, Truncate32(offset >> 32), Truncate32(offset), size, fixed_address);
+ if (!base_pointer)
+ return nullptr;
+#elif defined(__linux__)
+ const int flags = (fixed_address != nullptr) ? (MAP_SHARED | MAP_FIXED) : MAP_SHARED;
+ const int prot = PROT_READ | (writable ? PROT_WRITE : 0) | (executable ? PROT_EXEC : 0);
+ base_pointer = mmap64(fixed_address, size, prot, flags, m_shmem_fd, static_cast(offset));
+ if (base_pointer == reinterpret_cast(-1))
+ return nullptr;
+#else
+ return nullptr;
+#endif
+
+ m_num_views.fetch_add(1);
+ return base_pointer;
+}
+
+bool MemoryArena::FlushViewPtr(void* address, size_t size)
+{
+#if defined(WIN32)
+ return FlushViewOfFile(address, size);
+#elif defined(__linux__)
+ return (msync(address, size, 0) >= 0);
+#else
+ return false;
+#endif
+}
+
+bool MemoryArena::ReleaseViewPtr(void* address, size_t size)
+{
+ bool result;
+#if defined(WIN32)
+ result = static_cast(UnmapViewOfFile(address));
+#elif defined(__linux__)
+ result = (munmap(address, size) >= 0);
+#else
+ result = false;
+#endif
+
+ if (!result)
+ {
+ Log_ErrorPrintf("Failed to unmap previously-created view at %p", address);
+ return false;
+ }
+
+ const size_t prev_count = m_num_views.fetch_sub(1);
+ Assert(prev_count > 0);
+ return true;
+}
+
+bool MemoryArena::SetPageProtection(void* address, size_t length, bool readable, bool writable, bool executable)
+{
+#if defined(WIN32)
+ static constexpr DWORD protection_table[2][2][2] = {
+ {{PAGE_NOACCESS, PAGE_EXECUTE}, {PAGE_WRITECOPY, PAGE_EXECUTE_WRITECOPY}},
+ {{PAGE_READONLY, PAGE_EXECUTE_READ}, {PAGE_READWRITE, PAGE_EXECUTE_READWRITE}}};
+
+ DWORD old_protect;
+ return static_cast(
+ VirtualProtect(address, length, protection_table[readable][writable][executable], &old_protect));
+#elif defined(__linux__) || defined(__ANDROID__)
+ const int prot = (readable ? PROT_READ : 0) | (writable ? PROT_WRITE : 0) | (executable ? PROT_EXEC : 0);
+ return (mprotect(address, length, prot) >= 0);
+#else
+ return false;
+#endif
+}
+
+MemoryArena::View::View(MemoryArena* parent, void* base_pointer, size_t arena_offset, size_t mapping_size,
+ bool writable)
+ : m_parent(parent), m_base_pointer(base_pointer), m_arena_offset(arena_offset), m_mapping_size(mapping_size),
+ m_writable(writable)
+{
+}
+
+MemoryArena::View::View(View&& view)
+ : m_parent(view.m_parent), m_base_pointer(view.m_base_pointer), m_arena_offset(view.m_arena_offset),
+ m_mapping_size(view.m_mapping_size)
+{
+ view.m_parent = nullptr;
+ view.m_base_pointer = nullptr;
+ view.m_arena_offset = 0;
+ view.m_mapping_size = 0;
+}
+
+MemoryArena::View::~View()
+{
+ if (m_parent)
+ {
+ if (m_writable && !m_parent->FlushViewPtr(m_base_pointer, m_mapping_size))
+ Panic("Failed to flush previously-created view");
+ if (!m_parent->ReleaseViewPtr(m_base_pointer, m_mapping_size))
+ Panic("Failed to unmap previously-created view");
+ }
+}
+} // namespace Common
diff --git a/src/common/memory_arena.h b/src/common/memory_arena.h
new file mode 100644
index 000000000..8e175bd47
--- /dev/null
+++ b/src/common/memory_arena.h
@@ -0,0 +1,58 @@
+#pragma once
+#include "types.h"
+#include
+#include
+
+namespace Common {
+class MemoryArena
+{
+public:
+ class View
+ {
+ public:
+ View(MemoryArena* parent, void* base_pointer, size_t arena_offset, size_t mapping_size, bool writable);
+ View(View&& view);
+ ~View();
+
+ void* GetBasePointer() const { return m_base_pointer; }
+ size_t GetArenaOffset() const { return m_arena_offset; }
+ size_t GetMappingSize() const { return m_mapping_size; }
+ bool IsWritable() const { return m_writable; }
+
+ private:
+ MemoryArena* m_parent;
+ void* m_base_pointer;
+ size_t m_arena_offset;
+ size_t m_mapping_size;
+ bool m_writable;
+ };
+
+ MemoryArena();
+ ~MemoryArena();
+
+ static void* FindBaseAddressForMapping(size_t size);
+
+ bool Create(size_t size, bool writable, bool executable);
+
+ std::optional CreateView(size_t offset, size_t size, bool writable, bool executable,
+ void* fixed_address = nullptr);
+
+ void* CreateViewPtr(size_t offset, size_t size, bool writable, bool executable, void* fixed_address = nullptr);
+ bool FlushViewPtr(void* address, size_t size);
+ bool ReleaseViewPtr(void* address, size_t size);
+
+ static bool SetPageProtection(void* address, size_t length, bool readable, bool writable, bool executable);
+
+private:
+#if defined(WIN32)
+ void* m_file_handle = nullptr;
+#elif defined(__linux__)
+ int m_shmem_fd = -1;
+#endif
+
+ std::atomic_size_t m_num_views{0};
+ size_t m_size = 0;
+ bool m_writable = false;
+ bool m_executable = false;
+};
+} // namespace Common
diff --git a/src/common/page_fault_handler.cpp b/src/common/page_fault_handler.cpp
new file mode 100644
index 000000000..448783475
--- /dev/null
+++ b/src/common/page_fault_handler.cpp
@@ -0,0 +1,186 @@
+#include "page_fault_handler.h"
+#include "common/log.h"
+#include
+#include
+#include
+#include
+Log_SetChannel(Common::PageFaultHandler);
+
+#if defined(WIN32)
+#include "common/windows_headers.h"
+#elif defined(__linux__) || defined(__ANDROID__)
+#include
+#include
+#include
+#define USE_SIGSEGV 1
+#endif
+
+namespace Common::PageFaultHandler {
+
+struct RegisteredHandler
+{
+ void* owner;
+ Callback callback;
+};
+static std::vector m_handlers;
+static std::mutex m_handler_lock;
+static thread_local bool s_in_handler;
+
+#if defined(WIN32)
+static PVOID s_veh_handle;
+
+static LONG ExceptionHandler(PEXCEPTION_POINTERS exi)
+{
+ if (exi->ExceptionRecord->ExceptionCode != EXCEPTION_ACCESS_VIOLATION || s_in_handler)
+ return EXCEPTION_CONTINUE_SEARCH;
+
+ s_in_handler = true;
+
+ void* const exception_pc = reinterpret_cast(exi->ContextRecord->Rip);
+ void* const exception_address = reinterpret_cast(exi->ExceptionRecord->ExceptionInformation[1]);
+ bool const is_write = exi->ExceptionRecord->ExceptionInformation[0] == 1;
+
+ std::lock_guard guard(m_handler_lock);
+ for (const RegisteredHandler& rh : m_handlers)
+ {
+ if (rh.callback(exception_pc, exception_address, is_write) == HandlerResult::ContinueExecution)
+ {
+ s_in_handler = false;
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ }
+
+ s_in_handler = false;
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+#elif defined(USE_SIGSEGV)
+
+static struct sigaction s_old_sigsegv_action;
+
+static void SIGSEGVHandler(int sig, siginfo_t* info, void* ctx)
+{
+ if ((info->si_code != SEGV_MAPERR && info->si_code != SEGV_ACCERR) || s_in_handler)
+ return;
+
+ void* const exception_address = reinterpret_cast(info->si_addr);
+
+#if defined(__x86_64__)
+ void* const exception_pc = reinterpret_cast(static_cast(ctx)->uc_mcontext.gregs[REG_RIP]);
+ const bool is_write = (static_cast(ctx)->uc_mcontext.gregs[REG_ERR] & 2) != 0;
+#elif defined(__aarch64__)
+ void* const exception_pc = reinterpret_cast(static_cast(ctx)->uc_mcontext.pc);
+ const bool is_write = false;
+#else
+ void* const exception_pc = nullptr;
+ const bool is_write = false;
+#endif
+
+ std::lock_guard guard(m_handler_lock);
+ for (const RegisteredHandler& rh : m_handlers)
+ {
+ if (rh.callback(exception_pc, exception_address, is_write) == HandlerResult::ContinueExecution)
+ {
+ s_in_handler = false;
+ return;
+ }
+ }
+
+ // call old signal handler
+ if (s_old_sigsegv_action.sa_flags & SA_SIGINFO)
+ s_old_sigsegv_action.sa_sigaction(sig, info, ctx);
+ else if (s_old_sigsegv_action.sa_handler == SIG_DFL)
+ signal(sig, SIG_DFL);
+ else if (s_old_sigsegv_action.sa_handler == SIG_IGN)
+ return;
+ else
+ s_old_sigsegv_action.sa_handler(sig);
+}
+
+#endif
+
+bool InstallHandler(void* owner, Callback callback)
+{
+ bool was_empty;
+ {
+ std::lock_guard guard(m_handler_lock);
+ if (std::find_if(m_handlers.begin(), m_handlers.end(),
+ [owner](const RegisteredHandler& rh) { return rh.owner == owner; }) != m_handlers.end())
+ {
+ return false;
+ }
+
+ was_empty = m_handlers.empty();
+ m_handlers.push_back(RegisteredHandler{owner, std::move(callback)});
+ }
+
+ if (was_empty)
+ {
+#if defined(WIN32)
+ s_veh_handle = AddVectoredExceptionHandler(1, ExceptionHandler);
+ if (!s_veh_handle)
+ {
+ Log_ErrorPrint("Failed to add vectored exception handler");
+ return false;
+ }
+#elif defined(USE_SIGSEGV)
+#if 0
+ // TODO: Is this needed?
+ stack_t signal_stack = {};
+ signal_stack.ss_sp = malloc(SIGSTKSZ);
+ signal_stack.ss_size = SIGSTKSZ;
+ if (sigaltstack(&signal_stack, nullptr))
+ {
+ Log_ErrorPrintf("signaltstack() failed: %d", errno);
+ return false;
+ }
+#endif
+
+ struct sigaction sa = {};
+ sa.sa_sigaction = SIGSEGVHandler;
+ sa.sa_flags = SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(SIGSEGV, &sa, &s_old_sigsegv_action) < 0)
+ {
+ Log_ErrorPrintf("sigaction() failed: %d", errno);
+ return false;
+ }
+#else
+ return false;
+#endif
+ }
+
+ return true;
+}
+
+bool RemoveHandler(void* owner)
+{
+ std::lock_guard guard(m_handler_lock);
+ auto it = std::find_if(m_handlers.begin(), m_handlers.end(),
+ [owner](const RegisteredHandler& rh) { return rh.owner == owner; });
+ if (it == m_handlers.end())
+ return false;
+
+ m_handlers.erase(it);
+
+ if (m_handlers.empty())
+ {
+#if defined(WIN32)
+ RemoveVectoredExceptionHandler(s_veh_handle);
+ s_veh_handle = nullptr;
+#else
+ // restore old signal handler
+ if (sigaction(SIGSEGV, &s_old_sigsegv_action, nullptr) < 0)
+ {
+ Log_ErrorPrintf("sigaction() failed: %d", errno);
+ return false;
+ }
+
+ s_old_sigsegv_action = {};
+#endif
+ }
+
+ return true;
+}
+
+} // namespace Common::PageFaultHandler
diff --git a/src/common/page_fault_handler.h b/src/common/page_fault_handler.h
new file mode 100644
index 000000000..67ef38cbd
--- /dev/null
+++ b/src/common/page_fault_handler.h
@@ -0,0 +1,17 @@
+#pragma once
+#include "types.h"
+
+namespace Common::PageFaultHandler {
+enum class HandlerResult
+{
+ ContinueExecution,
+ ExecuteNextHandler,
+};
+
+using Callback = HandlerResult(*)(void* exception_pc, void* fault_address, bool is_write);
+using Handle = void*;
+
+bool InstallHandler(void* owner, Callback callback);
+bool RemoveHandler(void* owner);
+
+} // namespace Common::PageFaultHandler
diff --git a/src/core/bus.cpp b/src/core/bus.cpp
index 6954994f4..653ac15e4 100644
--- a/src/core/bus.cpp
+++ b/src/core/bus.cpp
@@ -10,6 +10,7 @@
#include "cpu_disasm.h"
#include "dma.h"
#include "gpu.h"
+#include "host_interface.h"
#include "interrupt_controller.h"
#include "mdec.h"
#include "pad.h"
@@ -69,8 +70,9 @@ union MEMCTRL
};
std::bitset m_ram_code_bits{};
-u8 g_ram[RAM_SIZE]{}; // 2MB RAM
-u8 g_bios[BIOS_SIZE]{}; // 512K BIOS ROM
+u8* g_ram = nullptr; // 2MB RAM
+u8* g_bios = nullptr; // 512K BIOS ROM
+u8* g_scratchpad = nullptr;
static std::array m_exp1_access_time = {};
static std::array m_exp2_access_time = {};
@@ -85,9 +87,19 @@ static u32 m_ram_size_reg = 0;
static std::string m_tty_line_buffer;
+static Common::MemoryArena m_memory_arena;
+static u8* m_fastmem_base = nullptr;
+static std::vector m_fastmem_ram_views;
+static std::vector m_fastmem_scratchpad_views;
+static std::vector m_fastmem_bios_views;
+
static std::tuple CalculateMemoryTiming(MEMDELAY mem_delay, COMDELAY common_delay);
static void RecalculateMemoryTimings();
+static void SetCodePageFastmemProtection(u32 page_index, bool writable);
+static bool AllocateMemory();
+static void UnmapFastmemViews();
+
#define FIXUP_WORD_READ_OFFSET(offset) ((offset) & ~u32(3))
#define FIXUP_WORD_READ_VALUE(offset, value) ((value) >> (((offset)&u32(3)) * 8u))
#define FIXUP_HALFWORD_READ_OFFSET(offset) ((offset) & ~u32(1))
@@ -103,19 +115,35 @@ ALWAYS_INLINE static void FixupUnalignedWordAccessW32(u32& offset, u32& value)
value <<= byte_offset * 8;
}
-void Initialize()
+bool Initialize()
{
+ if (!AllocateMemory())
+ {
+ g_host_interface->ReportError("Failed to allocate memory");
+ return false;
+ }
+
Reset();
+ return true;
}
void Shutdown()
{
- //
+ UnmapFastmemViews();
+ if (g_ram)
+ m_memory_arena.ReleaseViewPtr(g_ram, RAM_SIZE);
+ if (g_bios)
+ m_memory_arena.ReleaseViewPtr(g_bios, BIOS_SIZE);
+ if (g_scratchpad)
+ m_memory_arena.ReleaseViewPtr(g_scratchpad, FASTMEM_SCRATCHPAD_SIZE);
+
+ CPU::g_state.fastmem_base = nullptr;
}
void Reset()
{
- std::memset(g_ram, 0, sizeof(g_ram));
+ std::memset(g_ram, 0, RAM_SIZE);
+ std::memset(g_scratchpad, 0, SCRATCHPAD_SIZE);
m_MEMCTRL.exp1_base = 0x1F000000;
m_MEMCTRL.exp2_base = 0x1F802000;
m_MEMCTRL.exp1_delay_size.bits = 0x0013243F;
@@ -137,8 +165,9 @@ bool DoState(StateWrapper& sw)
sw.Do(&m_bios_access_time);
sw.Do(&m_cdrom_access_time);
sw.Do(&m_spu_access_time);
- sw.DoBytes(g_ram, sizeof(g_ram));
- sw.DoBytes(g_bios, sizeof(g_bios));
+ sw.DoBytes(g_ram, RAM_SIZE);
+ sw.DoBytes(g_bios, BIOS_SIZE);
+ sw.DoBytes(g_scratchpad, SCRATCHPAD_SIZE);
sw.DoArray(m_MEMCTRL.regs, countof(m_MEMCTRL.regs));
sw.Do(&m_ram_size_reg);
sw.Do(&m_tty_line_buffer);
@@ -217,6 +246,209 @@ void RecalculateMemoryTimings()
m_spu_access_time[2] + 1);
}
+bool AllocateMemory()
+{
+ if (!m_memory_arena.Create(MEMORY_ARENA_SIZE, true, false))
+ {
+ Log_ErrorPrint("Failed to create memory arena");
+ return false;
+ }
+
+ // Create the base views.
+ g_ram = static_cast(m_memory_arena.CreateViewPtr(MEMORY_ARENA_RAM_OFFSET, RAM_SIZE, true, false));
+ g_bios = static_cast(m_memory_arena.CreateViewPtr(MEMORY_ARENA_BIOS_OFFSET, BIOS_SIZE, true, false));
+ g_scratchpad = static_cast(
+ m_memory_arena.CreateViewPtr(MEMORY_ARENA_SCRATCHPAD_OFFSET, FASTMEM_SCRATCHPAD_SIZE, true, false));
+ if (!g_ram || !g_bios)
+ {
+ Log_ErrorPrint("Failed to create base views of memory");
+ return false;
+ }
+
+ return true;
+}
+
+void UnmapFastmemViews()
+{
+ m_fastmem_ram_views.clear();
+ m_fastmem_scratchpad_views.clear();
+ m_fastmem_bios_views.clear();
+}
+
+void UpdateFastmemViews(bool enabled, bool isolate_cache)
+{
+ UnmapFastmemViews();
+ if (!enabled)
+ {
+ m_fastmem_base = nullptr;
+ return;
+ }
+
+ Log_DevPrintf("Remapping fastmem area, isolate cache = %s", isolate_cache ? "true " : "false");
+ if (!m_fastmem_base)
+ {
+ m_fastmem_base = static_cast(m_memory_arena.FindBaseAddressForMapping(FASTMEM_REGION_SIZE));
+ if (!m_fastmem_base)
+ {
+ Log_ErrorPrint("Failed to find base address for fastmem");
+ return;
+ }
+
+ Log_InfoPrintf("Fastmem base: %p", m_fastmem_base);
+ CPU::g_state.fastmem_base = m_fastmem_base;
+ }
+
+ auto MapRAM = [](u32 base_address) {
+ u8* map_address = m_fastmem_base + base_address;
+ auto view = m_memory_arena.CreateView(MEMORY_ARENA_RAM_OFFSET, RAM_SIZE, true, false, map_address);
+ if (!view)
+ {
+ Log_ErrorPrintf("Failed to map RAM at fastmem area %p (offset 0x%08X)", map_address, RAM_SIZE);
+ return;
+ }
+
+ // mark all pages with code as non-writable
+ for (u32 i = 0; i < CPU_CODE_CACHE_PAGE_COUNT; i++)
+ {
+ if (m_ram_code_bits[i])
+ {
+ u8* page_address = map_address + (i * CPU_CODE_CACHE_PAGE_SIZE);
+ if (!m_memory_arena.SetPageProtection(page_address, CPU_CODE_CACHE_PAGE_SIZE, true, false, false))
+ Log_ErrorPrintf("Failed to write-protect code page at %p");
+ }
+ }
+
+ m_fastmem_ram_views.push_back(std::move(view.value()));
+ };
+ auto MapScratchpad = [](u32 base_address) {
+ u8* map_address = m_fastmem_base + base_address;
+ auto view =
+ m_memory_arena.CreateView(MEMORY_ARENA_SCRATCHPAD_OFFSET, FASTMEM_SCRATCHPAD_SIZE, true, false, map_address);
+ if (!view)
+ {
+ Log_ErrorPrintf("Failed to map scratchpad at fastmem area %p (offset 0x%08X)", map_address,
+ FASTMEM_SCRATCHPAD_SIZE);
+ return;
+ }
+
+ // mark all pages beyond the first as inaccessible
+ // we need to do this because of windows's stupidity with its 64K mapping granularity
+ if (!m_memory_arena.SetPageProtection(map_address + CPU_CODE_CACHE_PAGE_SIZE,
+ FASTMEM_SCRATCHPAD_SIZE - CPU_CODE_CACHE_PAGE_SIZE, false, false, false))
+ {
+ Log_ErrorPrintf("Failed to read/write protect scratchpad");
+ }
+
+ m_fastmem_scratchpad_views.push_back(std::move(view.value()));
+ };
+ auto MapBIOS = [](u32 base_address) {
+ u8* map_address = m_fastmem_base + base_address;
+ auto view = m_memory_arena.CreateView(MEMORY_ARENA_BIOS_OFFSET, BIOS_SIZE, false, false, map_address);
+ if (!view)
+ {
+ Log_ErrorPrintf("Failed to map BIOS at fastmem area %p (offset 0x%08X)", map_address, RAM_SIZE);
+ return;
+ }
+
+ m_fastmem_bios_views.push_back(std::move(view.value()));
+ };
+
+ if (!isolate_cache)
+ {
+ // KUSEG - cached
+ MapRAM(0x00000000);
+ // MapScratchpad(0x1F800000);
+ // MapBIOS(0x1FC00000);
+
+ // KSEG0 - cached
+ MapRAM(0x80000000);
+ // MapScratchpad(0x9F800000);
+ // MapBIOS(0x9FC00000);
+ }
+
+ // KSEG1 - uncached
+ MapRAM(0xA0000000);
+ // MapBIOS(0xBFC00000);
+}
+
+bool IsRAMCodePage(u32 index)
+{
+ return m_ram_code_bits[index];
+}
+
+void SetRAMCodePage(u32 index)
+{
+ if (m_ram_code_bits[index])
+ return;
+
+ // protect fastmem pages
+ m_ram_code_bits[index] = true;
+ SetCodePageFastmemProtection(index, false);
+}
+
+void ClearRAMCodePage(u32 index)
+{
+ if (!m_ram_code_bits[index])
+ return;
+
+ // unprotect fastmem pages
+ m_ram_code_bits[index] = false;
+ SetCodePageFastmemProtection(index, true);
+}
+
+void SetCodePageFastmemProtection(u32 page_index, bool writable)
+{
+ // unprotect fastmem pages
+ for (const auto& view : m_fastmem_ram_views)
+ {
+ u8* page_address = static_cast(view.GetBasePointer()) + (page_index * CPU_CODE_CACHE_PAGE_SIZE);
+ if (!m_memory_arena.SetPageProtection(page_address, CPU_CODE_CACHE_PAGE_SIZE, true, writable, false))
+ {
+ Log_ErrorPrintf("Failed to %s code page %u (0x%08X) @ %p", writable ? "unprotect" : "protect", page_index,
+ page_index * CPU_CODE_CACHE_PAGE_SIZE, page_address);
+ }
+ }
+}
+
+void ClearRAMCodePageFlags()
+{
+ m_ram_code_bits.reset();
+
+ // unprotect fastmem pages
+ for (const auto& view : m_fastmem_ram_views)
+ {
+ if (!m_memory_arena.SetPageProtection(view.GetBasePointer(), view.GetMappingSize(), true, true, false))
+ {
+ Log_ErrorPrintf("Failed to unprotect code pages for fastmem view @ %p", view.GetBasePointer());
+ }
+ }
+}
+
+bool IsCodePageAddress(PhysicalMemoryAddress address)
+{
+ return IsRAMAddress(address) ? m_ram_code_bits[(address & RAM_MASK) / CPU_CODE_CACHE_PAGE_SIZE] : false;
+}
+
+bool HasCodePagesInRange(PhysicalMemoryAddress start_address, u32 size)
+{
+ if (!IsRAMAddress(start_address))
+ return false;
+
+ start_address = (start_address & RAM_MASK);
+
+ const u32 end_address = start_address + size;
+ while (start_address < end_address)
+ {
+ const u32 code_page_index = start_address / CPU_CODE_CACHE_PAGE_SIZE;
+ if (m_ram_code_bits[code_page_index])
+ return true;
+
+ start_address += CPU_CODE_CACHE_PAGE_SIZE;
+ }
+
+ return false;
+}
+
static TickCount DoInvalidAccess(MemoryAccessType type, MemoryAccessSize size, PhysicalMemoryAddress address,
u32& value)
{
@@ -288,7 +520,7 @@ ALWAYS_INLINE static TickCount DoRAMAccess(u32 offset, u32& value)
}
}
- return (type == MemoryAccessType::Read) ? 4 : 0;
+ return (type == MemoryAccessType::Read) ? RAM_READ_TICKS : 0;
}
template
@@ -753,7 +985,7 @@ ALWAYS_INLINE_RELEASE void DoInstructionRead(PhysicalMemoryAddress address, void
{
std::memcpy(data, &g_ram[address & RAM_MASK], sizeof(u32) * word_count);
if constexpr (add_ticks)
- g_state.pending_ticks += (icache_read ? 1 : 4) * word_count;
+ g_state.pending_ticks += (icache_read ? 1 : RAM_READ_TICKS) * word_count;
}
else if (address >= BIOS_BASE && address < (BIOS_BASE + BIOS_SIZE))
{
@@ -776,7 +1008,7 @@ TickCount GetInstructionReadTicks(VirtualMemoryAddress address)
if (address < RAM_MIRROR_END)
{
- return 4;
+ return RAM_READ_TICKS;
}
else if (address >= BIOS_BASE && address < (BIOS_BASE + BIOS_SIZE))
{
@@ -894,34 +1126,36 @@ static void WriteCacheControl(u32 value)
template
ALWAYS_INLINE static TickCount DoScratchpadAccess(PhysicalMemoryAddress address, u32& value)
{
+ using namespace Bus;
+
const PhysicalMemoryAddress cache_offset = address & DCACHE_OFFSET_MASK;
if constexpr (size == MemoryAccessSize::Byte)
{
if constexpr (type == MemoryAccessType::Read)
- value = ZeroExtend32(g_state.dcache[cache_offset]);
+ value = ZeroExtend32(g_scratchpad[cache_offset]);
else
- g_state.dcache[cache_offset] = Truncate8(value);
+ g_scratchpad[cache_offset] = Truncate8(value);
}
else if constexpr (size == MemoryAccessSize::HalfWord)
{
if constexpr (type == MemoryAccessType::Read)
{
u16 temp;
- std::memcpy(&temp, &g_state.dcache[cache_offset], sizeof(temp));
+ std::memcpy(&temp, &g_scratchpad[cache_offset], sizeof(temp));
value = ZeroExtend32(temp);
}
else
{
u16 temp = Truncate16(value);
- std::memcpy(&g_state.dcache[cache_offset], &temp, sizeof(temp));
+ std::memcpy(&g_scratchpad[cache_offset], &temp, sizeof(temp));
}
}
else if constexpr (size == MemoryAccessSize::Word)
{
if constexpr (type == MemoryAccessType::Read)
- std::memcpy(&value, &g_state.dcache[cache_offset], sizeof(value));
+ std::memcpy(&value, &g_scratchpad[cache_offset], sizeof(value));
else
- std::memcpy(&g_state.dcache[cache_offset], &value, sizeof(value));
+ std::memcpy(&g_scratchpad[cache_offset], &value, sizeof(value));
}
return 0;
@@ -1307,6 +1541,64 @@ bool SafeWriteMemoryWord(VirtualMemoryAddress addr, u32 value)
return DoMemoryAccess(addr, value) >= 0;
}
+void* GetDirectReadMemoryPointer(VirtualMemoryAddress address, MemoryAccessSize size, TickCount* read_ticks)
+{
+ using namespace Bus;
+
+ const u32 seg = (address >> 29);
+ if (seg != 0 && seg != 4 && seg != 5)
+ return nullptr;
+
+ const PhysicalMemoryAddress paddr = address & PHYSICAL_MEMORY_ADDRESS_MASK;
+ if (paddr < RAM_MIRROR_END)
+ {
+ if (read_ticks)
+ *read_ticks = RAM_READ_TICKS;
+
+ return &g_ram[paddr & RAM_MASK];
+ }
+
+ if ((paddr & DCACHE_LOCATION_MASK) == DCACHE_LOCATION)
+ {
+ if (read_ticks)
+ *read_ticks = 0;
+
+ return &g_scratchpad[paddr & DCACHE_OFFSET_MASK];
+ }
+
+ if (paddr >= BIOS_BASE && paddr < (BIOS_BASE + BIOS_SIZE))
+ {
+ if (read_ticks)
+ *read_ticks = m_bios_access_time[static_cast(size)];
+
+ return &g_bios[paddr & BIOS_MASK];
+ }
+
+ return nullptr;
+}
+
+void* GetDirectWriteMemoryPointer(VirtualMemoryAddress address, MemoryAccessSize size)
+{
+ using namespace Bus;
+
+ const u32 seg = (address >> 29);
+ if (seg != 0 && seg != 4 && seg != 5)
+ return nullptr;
+
+ const PhysicalMemoryAddress paddr = address & PHYSICAL_MEMORY_ADDRESS_MASK;
+
+#if 0
+ // Not enabled until we can protect code regions.
+ if (paddr < RAM_MIRROR_END)
+ return &g_ram[paddr & RAM_MASK];
+#endif
+
+ if ((paddr & DCACHE_LOCATION_MASK) == DCACHE_LOCATION)
+ return &g_scratchpad[paddr & DCACHE_OFFSET_MASK];
+
+ return nullptr;
+}
+
namespace Recompiler::Thunks {
u64 ReadMemoryByte(u32 address)
diff --git a/src/core/bus.h b/src/core/bus.h
index 10c44f90e..921a68931 100644
--- a/src/core/bus.h
+++ b/src/core/bus.h
@@ -1,6 +1,6 @@
#pragma once
#include "common/bitfield.h"
-#include "cpu_code_cache.h"
+#include "common/memory_arena.h"
#include "types.h"
#include
#include
@@ -20,6 +20,9 @@ enum : u32
EXP1_BASE = 0x1F000000,
EXP1_SIZE = 0x800000,
EXP1_MASK = EXP1_SIZE - 1,
+ SCRATCHPAD_BASE = 0x1F800000,
+ SCRATCHPAD_SIZE = 0x400,
+ SCRATCHPAD_MASK = SCRATCHPAD_SIZE - 1,
MEMCTRL_BASE = 0x1F801000,
MEMCTRL_SIZE = 0x40,
MEMCTRL_MASK = MEMCTRL_SIZE - 1,
@@ -66,26 +69,72 @@ enum : u32
MEMCTRL_REG_COUNT = 9
};
-void Initialize();
+enum : TickCount
+{
+ RAM_READ_TICKS = 4
+};
+
+enum : size_t
+{
+ FASTMEM_SCRATCHPAD_SIZE = 0x10000,
+
+ // Our memory arena contains storage for RAM and BIOS.
+ MEMORY_ARENA_SIZE = RAM_SIZE + FASTMEM_SCRATCHPAD_SIZE + BIOS_SIZE,
+
+ // Offsets within the memory arena.
+ MEMORY_ARENA_RAM_OFFSET = 0,
+ MEMORY_ARENA_SCRATCHPAD_OFFSET = MEMORY_ARENA_RAM_OFFSET + RAM_SIZE,
+ MEMORY_ARENA_BIOS_OFFSET = MEMORY_ARENA_SCRATCHPAD_OFFSET + FASTMEM_SCRATCHPAD_SIZE,
+
+ // Fastmem region size is 4GB to cover the entire 32-bit address space.
+ FASTMEM_REGION_SIZE = UINT64_C(0x100000000)
+};
+
+bool Initialize();
void Shutdown();
void Reset();
bool DoState(StateWrapper& sw);
+u8* GetFastmemBase();
+void UpdateFastmemViews(bool enabled, bool isolate_cache);
+
void SetExpansionROM(std::vector data);
void SetBIOS(const std::vector& image);
extern std::bitset m_ram_code_bits;
-extern u8 g_ram[RAM_SIZE]; // 2MB RAM
-extern u8 g_bios[BIOS_SIZE]; // 512K BIOS ROM
+extern u8* g_ram; // 2MB RAM
+extern u8* g_bios; // 512K BIOS ROM
+extern u8* g_scratchpad; // 1KB scratchpad as 4K (in fastmem)
+
+/// Returns true if the address specified is writable (RAM).
+ALWAYS_INLINE static bool IsRAMAddress(PhysicalMemoryAddress address)
+{
+ return address < RAM_MIRROR_END;
+}
+
+/// Returns the code page index for a RAM address.
+ALWAYS_INLINE static u32 GetRAMCodePageIndex(PhysicalMemoryAddress address)
+{
+ return (address & RAM_MASK) / CPU_CODE_CACHE_PAGE_SIZE;
+}
+
+/// Returns true if the specified page contains code.
+bool IsRAMCodePage(u32 index);
/// Flags a RAM region as code, so we know when to invalidate blocks.
-ALWAYS_INLINE void SetRAMCodePage(u32 index) { m_ram_code_bits[index] = true; }
+void SetRAMCodePage(u32 index);
/// Unflags a RAM region as code, the code cache will no longer be notified when writes occur.
-ALWAYS_INLINE void ClearRAMCodePage(u32 index) { m_ram_code_bits[index] = false; }
+void ClearRAMCodePage(u32 index);
/// Clears all code bits for RAM regions.
-ALWAYS_INLINE void ClearRAMCodePageFlags() { m_ram_code_bits.reset(); }
+void ClearRAMCodePageFlags();
+
+/// Returns true if the specified address is in a code page.
+bool IsCodePageAddress(PhysicalMemoryAddress address);
+
+/// Returns true if the range specified overlaps with a code page.
+bool HasCodePagesInRange(PhysicalMemoryAddress start_address, u32 size);
/// Returns the number of cycles stolen by DMA RAM access.
ALWAYS_INLINE TickCount GetDMARAMTickCount(u32 word_count)
@@ -97,16 +146,4 @@ ALWAYS_INLINE TickCount GetDMARAMTickCount(u32 word_count)
return static_cast(word_count + ((word_count + 15) / 16));
}
-/// Invalidates any code pages which overlap the specified range.
-ALWAYS_INLINE void InvalidateCodePages(PhysicalMemoryAddress address, u32 word_count)
-{
- const u32 start_page = address / CPU_CODE_CACHE_PAGE_SIZE;
- const u32 end_page = (address + word_count * sizeof(u32)) / CPU_CODE_CACHE_PAGE_SIZE;
- for (u32 page = start_page; page <= end_page; page++)
- {
- if (m_ram_code_bits[page])
- CPU::CodeCache::InvalidateBlocksWithPageIndex(page);
- }
-}
-
-} // namespace Bus
+} // namespace Bus
\ No newline at end of file
diff --git a/src/core/cpu_code_cache.cpp b/src/core/cpu_code_cache.cpp
index 134ec92ac..3e1cd50f2 100644
--- a/src/core/cpu_code_cache.cpp
+++ b/src/core/cpu_code_cache.cpp
@@ -5,6 +5,7 @@
#include "cpu_core.h"
#include "cpu_core_private.h"
#include "cpu_disasm.h"
+#include "settings.h"
#include "system.h"
#include "timing_event.h"
Log_SetChannel(CPU::CodeCache);
@@ -35,14 +36,8 @@ alignas(Recompiler::CODE_STORAGE_ALIGNMENT) static u8
static JitCodeBuffer s_code_buffer;
-enum : u32
-{
- FAST_MAP_RAM_SLOT_COUNT = Bus::RAM_SIZE / 4,
- FAST_MAP_BIOS_SLOT_COUNT = Bus::BIOS_SIZE / 4,
- FAST_MAP_TOTAL_SLOT_COUNT = FAST_MAP_RAM_SLOT_COUNT + FAST_MAP_BIOS_SLOT_COUNT,
-};
-
std::array s_fast_map;
+CodeBlock::HostCodePointer s_asm_dispatcher;
ALWAYS_INLINE static u32 GetFastMapIndex(u32 pc)
{
@@ -51,6 +46,7 @@ ALWAYS_INLINE static u32 GetFastMapIndex(u32 pc)
((pc & Bus::RAM_MASK) >> 2);
}
+static void CompileDispatcher();
static void FastCompileBlockFunction();
static void ResetFastMap()
@@ -66,6 +62,7 @@ static void SetFastMap(u32 pc, CodeBlock::HostCodePointer function)
#endif
using BlockMap = std::unordered_map;
+using HostCodeMap = std::map;
void LogCurrentState();
@@ -90,35 +87,49 @@ static void LinkBlock(CodeBlock* from, CodeBlock* to);
/// Unlink all blocks which point to this block, and any that this block links to.
static void UnlinkBlock(CodeBlock* block);
-static bool s_use_recompiler = false;
static BlockMap s_blocks;
static std::array, CPU_CODE_CACHE_PAGE_COUNT> m_ram_block_map;
-void Initialize(bool use_recompiler)
+#ifdef WITH_RECOMPILER
+static HostCodeMap s_host_code_map;
+
+static void AddBlockToHostCodeMap(CodeBlock* block);
+static void RemoveBlockFromHostCodeMap(CodeBlock* block);
+static bool InitializeFastmem();
+static void ShutdownFastmem();
+static Common::PageFaultHandler::HandlerResult PageFaultHandler(void* exception_pc, void* fault_address, bool is_write);
+#endif
+
+void Initialize()
{
Assert(s_blocks.empty());
#ifdef WITH_RECOMPILER
- s_use_recompiler = use_recompiler;
-#ifdef USE_STATIC_CODE_BUFFER
- if (!s_code_buffer.Initialize(s_code_storage, sizeof(s_code_storage), RECOMPILER_FAR_CODE_CACHE_SIZE,
- RECOMPILER_GUARD_SIZE))
-#else
- if (!s_code_buffer.Allocate(RECOMPILER_CODE_CACHE_SIZE, RECOMPILER_FAR_CODE_CACHE_SIZE))
-#endif
+ if (g_settings.IsUsingRecompiler())
{
- Panic("Failed to initialize code space");
- }
-
- ResetFastMap();
+#ifdef USE_STATIC_CODE_BUFFER
+ if (!s_code_buffer.Initialize(s_code_storage, sizeof(s_code_storage), RECOMPILER_FAR_CODE_CACHE_SIZE,
+ RECOMPILER_GUARD_SIZE))
#else
- s_use_recompiler = false;
+ if (!s_code_buffer.Allocate(RECOMPILER_CODE_CACHE_SIZE, RECOMPILER_FAR_CODE_CACHE_SIZE))
+#endif
+ {
+ Panic("Failed to initialize code space");
+ }
+
+ if (g_settings.IsUsingFastmem() && !InitializeFastmem())
+ Panic("Failed to initialize fastmem");
+
+ ResetFastMap();
+ CompileDispatcher();
+ }
#endif
}
void Shutdown()
{
Flush();
+ ShutdownFastmem();
#ifdef WITH_RECOMPILER
s_code_buffer.Destroy();
#endif
@@ -131,18 +142,17 @@ static void ExecuteImpl()
while (!g_state.frame_done)
{
+ if (HasPendingInterrupt())
+ {
+ SafeReadInstruction(g_state.regs.pc, &g_state.next_instruction.bits);
+ DispatchInterrupt();
+ }
+
TimingEvents::UpdateCPUDowncount();
next_block_key = GetNextBlockKey();
while (g_state.pending_ticks < g_state.downcount)
{
- if (HasPendingInterrupt())
- {
- SafeReadInstruction(g_state.regs.pc, &g_state.next_instruction.bits);
- DispatchInterrupt();
- next_block_key = GetNextBlockKey();
- }
-
CodeBlock* block = LookupBlock(next_block_key);
if (!block)
{
@@ -152,6 +162,7 @@ static void ExecuteImpl()
}
reexecute_block:
+ Assert(!(HasPendingInterrupt()));
#if 0
const u32 tick = TimingEvents::GetGlobalTickCounter() + CPU::GetPendingTicks();
@@ -170,7 +181,7 @@ static void ExecuteImpl()
if (g_state.pending_ticks >= g_state.downcount)
break;
- else if (HasPendingInterrupt() || !USE_BLOCK_LINKING)
+ else if (!USE_BLOCK_LINKING)
continue;
next_block_key = GetNextBlockKey();
@@ -237,20 +248,32 @@ void Execute()
#ifdef WITH_RECOMPILER
+void CompileDispatcher()
+{
+ Recompiler::CodeGenerator cg(&s_code_buffer);
+ s_asm_dispatcher = cg.CompileDispatcher();
+}
+
+CodeBlock::HostCodePointer* GetFastMapPointer()
+{
+ return s_fast_map.data();
+}
+
void ExecuteRecompiler()
{
+#if 0
while (!g_state.frame_done)
{
+ if (HasPendingInterrupt())
+ {
+ SafeReadInstruction(g_state.regs.pc, &g_state.next_instruction.bits);
+ DispatchInterrupt();
+ }
+
TimingEvents::UpdateCPUDowncount();
while (g_state.pending_ticks < g_state.downcount)
{
- if (HasPendingInterrupt())
- {
- SafeReadInstruction(g_state.regs.pc, &g_state.next_instruction.bits);
- DispatchInterrupt();
- }
-
const u32 pc = g_state.regs.pc;
g_state.current_instruction_pc = pc;
const u32 fast_map_index = GetFastMapIndex(pc);
@@ -259,6 +282,9 @@ void ExecuteRecompiler()
TimingEvents::RunEvents();
}
+#else
+ s_asm_dispatcher();
+#endif
// in case we switch to interpreter...
g_state.regs.npc = g_state.regs.pc;
@@ -266,14 +292,33 @@ void ExecuteRecompiler()
#endif
-void SetUseRecompiler(bool enable)
+void Reinitialize()
{
-#ifdef WITH_RECOMPILER
- if (s_use_recompiler == enable)
- return;
-
- s_use_recompiler = enable;
Flush();
+#ifdef WITH_RECOMPILER
+
+ ShutdownFastmem();
+ s_code_buffer.Destroy();
+
+ if (g_settings.IsUsingRecompiler())
+ {
+
+#ifdef USE_STATIC_CODE_BUFFER
+ if (!s_code_buffer.Initialize(s_code_storage, sizeof(s_code_storage), RECOMPILER_FAR_CODE_CACHE_SIZE,
+ RECOMPILER_GUARD_SIZE))
+#else
+ if (!s_code_buffer.Allocate(RECOMPILER_CODE_CACHE_SIZE, RECOMPILER_FAR_CODE_CACHE_SIZE))
+#endif
+ {
+ Panic("Failed to initialize code space");
+ }
+
+ if (g_settings.IsUsingFastmem() && !InitializeFastmem())
+ Panic("Failed to initialize fastmem");
+
+ ResetFastMap();
+ CompileDispatcher();
+ }
#endif
}
@@ -285,10 +330,13 @@ void Flush()
for (const auto& it : s_blocks)
delete it.second;
+
s_blocks.clear();
#ifdef WITH_RECOMPILER
+ s_host_code_map.clear();
s_code_buffer.Reset();
ResetFastMap();
+ CompileDispatcher();
#endif
}
@@ -344,6 +392,8 @@ CodeBlock* LookupBlock(CodeBlockKey key)
}
iter = s_blocks.emplace(key.bits, block).first;
+ AddBlockToHostCodeMap(block);
+
return block;
}
@@ -370,6 +420,8 @@ bool RevalidateBlock(CodeBlock* block)
return true;
recompile:
+ RemoveBlockFromHostCodeMap(block);
+
block->instructions.clear();
if (!CompileBlock(block))
{
@@ -379,6 +431,7 @@ recompile:
}
// re-add to page map again
+ AddBlockToHostCodeMap(block);
if (block->IsInRAM())
AddBlockToPageMap(block);
@@ -425,6 +478,9 @@ bool CompileBlock(CodeBlock* block)
block->uncached_fetch_ticks += GetInstructionReadTicks(pc);
}
+ block->contains_loadstore_instructions |= cbi.is_load_instruction;
+ block->contains_loadstore_instructions |= cbi.is_store_instruction;
+
// instruction is decoded now
block->instructions.push_back(cbi);
pc += sizeof(cbi.instruction.bits);
@@ -467,7 +523,7 @@ bool CompileBlock(CodeBlock* block)
}
#ifdef WITH_RECOMPILER
- if (s_use_recompiler)
+ if (g_settings.IsUsingRecompiler())
{
// Ensure we're not going to run out of space while compiling this block.
if (s_code_buffer.GetFreeCodeSpace() <
@@ -538,6 +594,9 @@ void FlushBlock(CodeBlock* block)
RemoveBlockFromPageMap(block);
UnlinkBlock(block);
+#ifdef WITH_RECOMPILER
+ RemoveBlockFromHostCodeMap(block);
+#endif
s_blocks.erase(iter);
delete block;
@@ -599,4 +658,107 @@ void UnlinkBlock(CodeBlock* block)
block->link_successors.clear();
}
+#ifdef WITH_RECOMPILER
+
+void AddBlockToHostCodeMap(CodeBlock* block)
+{
+ if (!g_settings.IsUsingRecompiler())
+ return;
+
+ auto ir = s_host_code_map.emplace(block->host_code, block);
+ Assert(ir.second);
+}
+
+void RemoveBlockFromHostCodeMap(CodeBlock* block)
+{
+ if (!g_settings.IsUsingRecompiler())
+ return;
+
+ HostCodeMap::iterator hc_iter = s_host_code_map.find(block->host_code);
+ Assert(hc_iter != s_host_code_map.end());
+ s_host_code_map.erase(hc_iter);
+}
+
+bool InitializeFastmem()
+{
+ if (!Common::PageFaultHandler::InstallHandler(&s_host_code_map, PageFaultHandler))
+ {
+ Log_ErrorPrintf("Failed to install page fault handler");
+ return false;
+ }
+
+ Bus::UpdateFastmemViews(true, g_state.cop0_regs.sr.Isc);
+ return true;
+}
+
+void ShutdownFastmem()
+{
+ Common::PageFaultHandler::RemoveHandler(&s_host_code_map);
+ Bus::UpdateFastmemViews(false, false);
+}
+
+Common::PageFaultHandler::HandlerResult PageFaultHandler(void* exception_pc, void* fault_address, bool is_write)
+{
+ if (static_cast(fault_address) < g_state.fastmem_base ||
+ (static_cast(fault_address) - g_state.fastmem_base) >= Bus::FASTMEM_REGION_SIZE)
+ {
+ return Common::PageFaultHandler::HandlerResult::ExecuteNextHandler;
+ }
+
+ const PhysicalMemoryAddress fastmem_address =
+ static_cast(static_cast(static_cast(fault_address) - g_state.fastmem_base));
+
+ Log_DevPrintf("Page fault handler invoked at PC=%p Address=%p %s, fastmem offset 0x%08X", exception_pc, fault_address,
+ is_write ? "(write)" : "(read)", fastmem_address);
+
+ if (is_write && !g_state.cop0_regs.sr.Isc && Bus::IsRAMAddress(fastmem_address))
+ {
+ // this is probably a code page, since we aren't going to fault due to requiring fastmem on RAM.
+ const u32 code_page_index = Bus::GetRAMCodePageIndex(fastmem_address);
+ if (Bus::IsRAMCodePage(code_page_index))
+ {
+ InvalidateBlocksWithPageIndex(code_page_index);
+ return Common::PageFaultHandler::HandlerResult::ContinueExecution;
+ }
+ }
+
+ // use upper_bound to find the next block after the pc
+ HostCodeMap::iterator upper_iter =
+ s_host_code_map.upper_bound(reinterpret_cast(exception_pc));
+ if (upper_iter == s_host_code_map.begin())
+ return Common::PageFaultHandler::HandlerResult::ExecuteNextHandler;
+
+ // then decrement it by one to (hopefully) get the block we want
+ upper_iter--;
+
+ // find the loadstore info in the code block
+ CodeBlock* block = upper_iter->second;
+ for (auto bpi_iter = block->loadstore_backpatch_info.begin(); bpi_iter != block->loadstore_backpatch_info.end();
+ ++bpi_iter)
+ {
+ const Recompiler::LoadStoreBackpatchInfo& lbi = *bpi_iter;
+ if (lbi.host_pc == exception_pc)
+ {
+ // found it, do fixup
+ if (Recompiler::CodeGenerator::BackpatchLoadStore(lbi))
+ {
+ // remove the backpatch entry since we won't be coming back to this one
+ block->loadstore_backpatch_info.erase(bpi_iter);
+ return Common::PageFaultHandler::HandlerResult::ContinueExecution;
+ }
+ else
+ {
+ Log_ErrorPrintf("Failed to backpatch %p in block 0x%08X", exception_pc, block->GetPC());
+ return Common::PageFaultHandler::HandlerResult::ExecuteNextHandler;
+ }
+ }
+ }
+
+ // we didn't find the pc in our list..
+ Log_ErrorPrintf("Loadstore PC not found for %p in block 0x%08X", exception_pc, block->GetPC());
+ return Common::PageFaultHandler::HandlerResult::ExecuteNextHandler;
+}
+
+#endif
+
} // namespace CPU::CodeCache
diff --git a/src/core/cpu_code_cache.h b/src/core/cpu_code_cache.h
index 068e6706e..92a8b0d0b 100644
--- a/src/core/cpu_code_cache.h
+++ b/src/core/cpu_code_cache.h
@@ -1,14 +1,28 @@
#pragma once
+#include "bus.h"
#include "common/bitfield.h"
#include "common/jit_code_buffer.h"
+#include "common/page_fault_handler.h"
#include "cpu_types.h"
#include
+#include