// SPDX-FileCopyrightText: 2002-2024 PCSX2 Dev Team // SPDX-License-Identifier: GPL-3.0+ #include "common/Assertions.h" #include "common/BitUtils.h" #include "common/Console.h" #include "common/CrashHandler.h" #include "common/Error.h" #include "common/HostSys.h" #include #include #include #include #include #include #include #include #include "fmt/core.h" #if defined(__FreeBSD__) #include "cpuinfo.h" #endif // FreeBSD does not have MAP_FIXED_NOREPLACE, but does have MAP_EXCL. // MAP_FIXED combined with MAP_EXCL behaves like MAP_FIXED_NOREPLACE. #if defined(__FreeBSD__) && !defined(MAP_FIXED_NOREPLACE) #define MAP_FIXED_NOREPLACE (MAP_FIXED | MAP_EXCL) #endif static __ri uint LinuxProt(const PageProtectionMode& mode) { u32 lnxmode = 0; if (mode.CanWrite()) lnxmode |= PROT_WRITE; if (mode.CanRead()) lnxmode |= PROT_READ; if (mode.CanExecute()) lnxmode |= PROT_EXEC | PROT_READ; return lnxmode; } void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode) { pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned"); if (mode.IsNone()) return nullptr; const u32 prot = LinuxProt(mode); u32 flags = MAP_PRIVATE | MAP_ANONYMOUS; if (base) flags |= MAP_FIXED_NOREPLACE; void* res = mmap(base, size, prot, flags, -1, 0); if (res == MAP_FAILED) return nullptr; return res; } void HostSys::Munmap(void* base, size_t size) { if (!base) return; munmap((void*)base, size); } void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode) { pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned"); const u32 lnxmode = LinuxProt(mode); const int result = mprotect(baseaddr, size, lnxmode); if (result != 0) pxFail("mprotect() failed"); } std::string HostSys::GetFileMappingName(const char* prefix) { const unsigned pid = static_cast(getpid()); #if defined(__FreeBSD__) // FreeBSD's shm_open(3) requires name to be absolute return fmt::format("/tmp/{}_{}", prefix, pid); #else return fmt::format("{}_{}", prefix, pid); #endif } void* HostSys::CreateSharedMemory(const char* name, size_t size) { const int fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600); if (fd < 0) { std::fprintf(stderr, "shm_open failed: %d\n", errno); return nullptr; } // we're not going to be opening this mapping in other processes, so remove the file shm_unlink(name); // ensure it's the correct size if (ftruncate(fd, static_cast(size)) < 0) { std::fprintf(stderr, "ftruncate(%zu) failed: %d\n", size, errno); return nullptr; } return reinterpret_cast(static_cast(fd)); } void HostSys::DestroySharedMemory(void* ptr) { close(static_cast(reinterpret_cast(ptr))); } void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode) { const uint lnxmode = LinuxProt(mode); const int flags = (baseaddr != nullptr) ? (MAP_SHARED | MAP_FIXED_NOREPLACE) : MAP_SHARED; void* ptr = mmap(baseaddr, size, lnxmode, flags, static_cast(reinterpret_cast(handle)), static_cast(offset)); if (ptr == MAP_FAILED) return nullptr; return ptr; } void HostSys::UnmapSharedMemory(void* baseaddr, size_t size) { if (munmap(baseaddr, size) != 0) pxFailRel("Failed to unmap shared memory"); } size_t HostSys::GetRuntimePageSize() { int res = sysconf(_SC_PAGESIZE); return (res > 0) ? static_cast(res) : 0; } size_t HostSys::GetRuntimeCacheLineSize() { #if defined(__FreeBSD__) if (!cpuinfo_initialize()) return 0; u32 max_line_size = 0; for (u32 i = 0; i < cpuinfo_get_processors_count(); i++) { const u32 l1i = cpuinfo_get_processor(i)->cache.l1i->line_size; const u32 l1d = cpuinfo_get_processor(i)->cache.l1d->line_size; const u32 res = std::max(l1i, l1d); max_line_size = std::max(max_line_size, res); } return static_cast(max_line_size); #else int l1i = sysconf(_SC_LEVEL1_DCACHE_LINESIZE); int l1d = sysconf(_SC_LEVEL1_ICACHE_LINESIZE); int res = (l1i > l1d) ? l1i : l1d; for (int index = 0; index < 16; index++) { char buf[128]; snprintf(buf, sizeof(buf), "/sys/devices/system/cpu/cpu0/cache/index%d/coherency_line_size", index); std::FILE* fp = std::fopen(buf, "rb"); if (!fp) break; std::fread(buf, sizeof(buf), 1, fp); std::fclose(fp); int val = std::atoi(buf); res = (val > res) ? val : res; } return (res > 0) ? static_cast(res) : 0; #endif } SharedMemoryMappingArea::SharedMemoryMappingArea(u8* base_ptr, size_t size, size_t num_pages) : m_base_ptr(base_ptr) , m_size(size) , m_num_pages(num_pages) { } SharedMemoryMappingArea::~SharedMemoryMappingArea() { pxAssertRel(m_num_mappings == 0, "No mappings left"); if (munmap(m_base_ptr, m_size) != 0) pxFailRel("Failed to release shared memory area"); } std::unique_ptr SharedMemoryMappingArea::Create(size_t size) { pxAssertRel(Common::IsAlignedPow2(size, __pagesize), "Size is page aligned"); void* alloc = mmap(nullptr, size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (alloc == MAP_FAILED) return nullptr; return std::unique_ptr(new SharedMemoryMappingArea(static_cast(alloc), size, size / __pagesize)); } u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, const PageProtectionMode& mode) { pxAssert(static_cast(map_base) >= m_base_ptr && static_cast(map_base) < (m_base_ptr + m_size)); // MAP_FIXED is okay here, since we've reserved the entire region, and *want* to overwrite the mapping. const uint lnxmode = LinuxProt(mode); void* const ptr = mmap(map_base, map_size, lnxmode, MAP_SHARED | MAP_FIXED, static_cast(reinterpret_cast(file_handle)), static_cast(file_offset)); if (ptr == MAP_FAILED) return nullptr; m_num_mappings++; return static_cast(ptr); } bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size) { pxAssert(static_cast(map_base) >= m_base_ptr && static_cast(map_base) < (m_base_ptr + m_size)); if (mmap(map_base, map_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED) return false; m_num_mappings--; return true; } namespace PageFaultHandler { static std::recursive_mutex s_exception_handler_mutex; static bool s_in_exception_handler = false; static bool s_installed = false; } // namespace PageFaultHandler #ifdef _M_ARM64 void HostSys::FlushInstructionCache(void* address, u32 size) { __builtin___clear_cache(reinterpret_cast(address), reinterpret_cast(address) + size); } [[maybe_unused]] static bool IsStoreInstruction(const void* ptr) { u32 bits; std::memcpy(&bits, ptr, sizeof(bits)); // Based on vixl's disassembler Instruction::IsStore(). // if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) if ((bits & 0x0a000000) != 0x08000000) return false; // if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) if ((bits & 0x3a000000) == 0x28000000) { // return Mask(LoadStorePairLBit) == 0 return (bits & (1 << 22)) == 0; } switch (bits & 0xC4C00000) { case 0x00000000: // STRB_w case 0x40000000: // STRH_w case 0x80000000: // STR_w case 0xC0000000: // STR_x case 0x04000000: // STR_b case 0x44000000: // STR_h case 0x84000000: // STR_s case 0xC4000000: // STR_d case 0x04800000: // STR_q return true; default: return false; } } #endif // _M_ARM64 namespace PageFaultHandler { static void SignalHandler(int sig, siginfo_t* info, void* ctx); } // namespace PageFaultHandler void PageFaultHandler::SignalHandler(int sig, siginfo_t* info, void* ctx) { #if defined(__linux__) void* const exception_address = reinterpret_cast(info->si_addr); #if defined(_M_X86) void* const exception_pc = reinterpret_cast(static_cast(ctx)->uc_mcontext.gregs[REG_RIP]); const bool is_write = (static_cast(ctx)->uc_mcontext.gregs[REG_ERR] & 2) != 0; #elif defined(_M_ARM64) void* const exception_pc = reinterpret_cast(static_cast(ctx)->uc_mcontext.pc); const bool is_write = IsStoreInstruction(exception_pc); #endif #elif defined(__FreeBSD__) #if defined(_M_X86) void* const exception_address = reinterpret_cast(static_cast(ctx)->uc_mcontext.mc_addr); void* const exception_pc = reinterpret_cast(static_cast(ctx)->uc_mcontext.mc_rip); const bool is_write = (static_cast(ctx)->uc_mcontext.mc_err & 2) != 0; #elif defined(_M_ARM64) void* const exception_address = reinterpret_cast(static_cast(ctx)->uc_mcontext->__es.__far); void* const exception_pc = reinterpret_cast(static_cast(ctx)->uc_mcontext->__ss.__pc); const bool is_write = IsStoreInstruction(exception_pc); #endif #endif // Executing the handler concurrently from multiple threads wouldn't go down well. s_exception_handler_mutex.lock(); // Prevent recursive exception filtering. HandlerResult result = HandlerResult::ExecuteNextHandler; if (!s_in_exception_handler) { s_in_exception_handler = true; result = HandlePageFault(exception_pc, exception_address, is_write); s_in_exception_handler = false; } s_exception_handler_mutex.unlock(); // Resumes execution right where we left off (re-executes instruction that caused the SIGSEGV). if (result == HandlerResult::ContinueExecution) return; // We couldn't handle it. Pass it off to the crash dumper. CrashHandler::CrashSignalHandler(sig, info, ctx); } bool PageFaultHandler::Install(Error* error) { std::unique_lock lock(s_exception_handler_mutex); pxAssertRel(!s_installed, "Page fault handler has already been installed."); struct sigaction sa; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_SIGINFO | SA_NODEFER; sa.sa_sigaction = SignalHandler; if (sigaction(SIGSEGV, &sa, nullptr) != 0) { Error::SetErrno(error, "sigaction() for SIGSEGV failed: ", errno); return false; } #ifdef _M_ARM64 // We can get SIGBUS on ARM64. if (sigaction(SIGBUS, &sa, nullptr) != 0) { Error::SetErrno(error, "sigaction() for SIGBUS failed: ", errno); return false; } #endif s_installed = true; return true; }