pcsx2/common/Linux/LnxHostSys.cpp

373 lines
10 KiB
C++
Raw Permalink Normal View History

// SPDX-FileCopyrightText: 2002-2024 PCSX2 Dev Team
// SPDX-License-Identifier: GPL-3.0+
#include "common/Assertions.h"
#include "common/BitUtils.h"
#include "common/Console.h"
#include "common/CrashHandler.h"
#include "common/Error.h"
#include "common/HostSys.h"
2021-09-01 20:31:46 +00:00
#include <cstdio>
#include <csignal>
#include <cerrno>
#include <fcntl.h>
#include <mutex>
#include <sys/mman.h>
#include <ucontext.h>
#include <unistd.h>
#include "fmt/core.h"
2024-07-11 14:57:28 +00:00
#if defined(__FreeBSD__)
#include "cpuinfo.h"
#endif
// FreeBSD does not have MAP_FIXED_NOREPLACE, but does have MAP_EXCL.
// MAP_FIXED combined with MAP_EXCL behaves like MAP_FIXED_NOREPLACE.
#if defined(__FreeBSD__) && !defined(MAP_FIXED_NOREPLACE)
#define MAP_FIXED_NOREPLACE (MAP_FIXED | MAP_EXCL)
#endif
static __ri uint LinuxProt(const PageProtectionMode& mode)
{
u32 lnxmode = 0;
Common: reformat (#4720) * common: format AlignedMalloc.cpp * common: format AppTrait.h * common: format Assertions.h * common: format CheckedStaticBox * common: format Console * common: format Dependencies.h * common: format EmbeddedImage * common: format EventSource * common: format Exceptions * common: format FastFormatString.cpp * common: format General.h * common: format InitInterface * common: format MathUtils.h * common: format MemsetFast/MemcpyFast * common: format Mutex.cpp * common: format PageFaultSource.h * common: format Path.h * common: format PathUtils.cpp * common: format Pcsx2Types.h * common: format Perf * common: format PersistentThread.h * common: format RwMutex * common: format SafeArray * common: format ScopedAlloc.h * common: format ScopedPtrMT.h * common: format Semaphore.cpp * common: format StringHelpers * common: format ThreadTools.cpp * common: format Threading.h * common: format ThreadingDialogs * common: format ThreadingInternal.h * common: format TraceLog.h * common: format VirtualMemory.cpp * common: format pxCheckBox * common: format pxEvents.h * common: format pxForwardDefs.h * common: format pxRadioPanel * common: format pxStaticText * common: format pxStreams * common: format pxTranslate.cpp * common: format pxWindowTextWriter.cpp * common: format wxAppWithHelpers * common: format wxBaseTools.h * common: format wxGuiTools * common: format wxHelpers.cpp * common: format Darwin directory * common: format Linux directory * common: format Windows directory * common: format LnxCpuDetect.cpp * common: format WinCpuDetect.cpp * common: format bmi.cpp * common: format cpudetect.cpp * common: format cpu_detect_internal.h * common: format fpu.cpp * common: format groups.cpp * common: format instructions.h * common: format internal.h * common: format jmp.cpp * common: format legacy.cpp * common: format legacy_instructions.h * common: format legacy_internal.h * common: format movs.cpp * common: format simd.cpp * common: format tools.h * common: format x86emitter.cpp * common: format x86types.h * common: format bmi.h * common: format dwshift.h * common: format group1.h group2.h group3.h * common: format incdec.h * common: format jmpcall.h * common: format movs.h * common: format simd_arithmetic.h * common: format simd_comparisons.h * common: format simd_helpers.h * common: format simd_moremovs.h * common: format simd_shufflepack.h * common: format simd_templated_helpers.h * common: format test.h
2021-09-06 18:28:26 +00:00
if (mode.CanWrite())
lnxmode |= PROT_WRITE;
if (mode.CanRead())
lnxmode |= PROT_READ;
if (mode.CanExecute())
lnxmode |= PROT_EXEC | PROT_READ;
return lnxmode;
}
void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode)
{
2023-12-22 10:30:31 +00:00
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");
if (mode.IsNone())
return nullptr;
const u32 prot = LinuxProt(mode);
u32 flags = MAP_PRIVATE | MAP_ANONYMOUS;
if (base)
flags |= MAP_FIXED_NOREPLACE;
void* res = mmap(base, size, prot, flags, -1, 0);
if (res == MAP_FAILED)
return nullptr;
return res;
}
void HostSys::Munmap(void* base, size_t size)
{
if (!base)
return;
munmap((void*)base, size);
}
void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode)
{
2023-12-22 10:30:31 +00:00
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");
const u32 lnxmode = LinuxProt(mode);
const int result = mprotect(baseaddr, size, lnxmode);
if (result != 0)
pxFail("mprotect() failed");
}
std::string HostSys::GetFileMappingName(const char* prefix)
{
const unsigned pid = static_cast<unsigned>(getpid());
#if defined(__FreeBSD__)
// FreeBSD's shm_open(3) requires name to be absolute
return fmt::format("/tmp/{}_{}", prefix, pid);
#else
return fmt::format("{}_{}", prefix, pid);
#endif
}
void* HostSys::CreateSharedMemory(const char* name, size_t size)
{
const int fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600);
if (fd < 0)
{
std::fprintf(stderr, "shm_open failed: %d\n", errno);
return nullptr;
}
// we're not going to be opening this mapping in other processes, so remove the file
shm_unlink(name);
// ensure it's the correct size
if (ftruncate(fd, static_cast<off_t>(size)) < 0)
{
2023-09-04 12:31:52 +00:00
std::fprintf(stderr, "ftruncate(%zu) failed: %d\n", size, errno);
return nullptr;
}
return reinterpret_cast<void*>(static_cast<intptr_t>(fd));
}
void HostSys::DestroySharedMemory(void* ptr)
{
close(static_cast<int>(reinterpret_cast<intptr_t>(ptr)));
}
void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode)
{
const uint lnxmode = LinuxProt(mode);
const int flags = (baseaddr != nullptr) ? (MAP_SHARED | MAP_FIXED_NOREPLACE) : MAP_SHARED;
void* ptr = mmap(baseaddr, size, lnxmode, flags, static_cast<int>(reinterpret_cast<intptr_t>(handle)), static_cast<off_t>(offset));
if (ptr == MAP_FAILED)
return nullptr;
return ptr;
}
void HostSys::UnmapSharedMemory(void* baseaddr, size_t size)
{
if (munmap(baseaddr, size) != 0)
pxFailRel("Failed to unmap shared memory");
}
2024-03-21 06:43:50 +00:00
size_t HostSys::GetRuntimePageSize()
{
int res = sysconf(_SC_PAGESIZE);
return (res > 0) ? static_cast<size_t>(res) : 0;
}
size_t HostSys::GetRuntimeCacheLineSize()
{
2024-07-11 14:57:28 +00:00
#if defined(__FreeBSD__)
if (!cpuinfo_initialize())
return 0;
u32 max_line_size = 0;
for (u32 i = 0; i < cpuinfo_get_processors_count(); i++)
{
const u32 l1i = cpuinfo_get_processor(i)->cache.l1i->line_size;
const u32 l1d = cpuinfo_get_processor(i)->cache.l1d->line_size;
const u32 res = std::max<u32>(l1i, l1d);
max_line_size = std::max<u32>(max_line_size, res);
}
return static_cast<size_t>(max_line_size);
#else
2024-03-21 06:43:50 +00:00
int l1i = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
int l1d = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
int res = (l1i > l1d) ? l1i : l1d;
for (int index = 0; index < 16; index++)
{
char buf[128];
snprintf(buf, sizeof(buf), "/sys/devices/system/cpu/cpu0/cache/index%d/coherency_line_size", index);
std::FILE* fp = std::fopen(buf, "rb");
if (!fp)
break;
std::fread(buf, sizeof(buf), 1, fp);
std::fclose(fp);
int val = std::atoi(buf);
res = (val > res) ? val : res;
}
return (res > 0) ? static_cast<size_t>(res) : 0;
2024-07-11 14:57:28 +00:00
#endif
2024-03-21 06:43:50 +00:00
}
SharedMemoryMappingArea::SharedMemoryMappingArea(u8* base_ptr, size_t size, size_t num_pages)
: m_base_ptr(base_ptr)
, m_size(size)
, m_num_pages(num_pages)
{
}
SharedMemoryMappingArea::~SharedMemoryMappingArea()
{
pxAssertRel(m_num_mappings == 0, "No mappings left");
if (munmap(m_base_ptr, m_size) != 0)
pxFailRel("Failed to release shared memory area");
}
std::unique_ptr<SharedMemoryMappingArea> SharedMemoryMappingArea::Create(size_t size)
{
pxAssertRel(Common::IsAlignedPow2(size, __pagesize), "Size is page aligned");
void* alloc = mmap(nullptr, size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (alloc == MAP_FAILED)
return nullptr;
return std::unique_ptr<SharedMemoryMappingArea>(new SharedMemoryMappingArea(static_cast<u8*>(alloc), size, size / __pagesize));
}
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, const PageProtectionMode& mode)
{
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
// MAP_FIXED is okay here, since we've reserved the entire region, and *want* to overwrite the mapping.
const uint lnxmode = LinuxProt(mode);
void* const ptr = mmap(map_base, map_size, lnxmode, MAP_SHARED | MAP_FIXED,
static_cast<int>(reinterpret_cast<intptr_t>(file_handle)), static_cast<off_t>(file_offset));
if (ptr == MAP_FAILED)
return nullptr;
m_num_mappings++;
return static_cast<u8*>(ptr);
}
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
{
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
if (mmap(map_base, map_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED)
return false;
m_num_mappings--;
return true;
}
namespace PageFaultHandler
{
static std::recursive_mutex s_exception_handler_mutex;
static bool s_in_exception_handler = false;
static bool s_installed = false;
} // namespace PageFaultHandler
#ifdef _M_ARM64
void HostSys::FlushInstructionCache(void* address, u32 size)
{
__builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size);
}
[[maybe_unused]] static bool IsStoreInstruction(const void* ptr)
{
u32 bits;
std::memcpy(&bits, ptr, sizeof(bits));
// Based on vixl's disassembler Instruction::IsStore().
// if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed)
if ((bits & 0x0a000000) != 0x08000000)
return false;
// if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed)
if ((bits & 0x3a000000) == 0x28000000)
{
// return Mask(LoadStorePairLBit) == 0
return (bits & (1 << 22)) == 0;
}
switch (bits & 0xC4C00000)
{
case 0x00000000: // STRB_w
case 0x40000000: // STRH_w
case 0x80000000: // STR_w
case 0xC0000000: // STR_x
case 0x04000000: // STR_b
case 0x44000000: // STR_h
case 0x84000000: // STR_s
case 0xC4000000: // STR_d
case 0x04800000: // STR_q
return true;
default:
return false;
}
}
#endif // _M_ARM64
namespace PageFaultHandler
{
static void SignalHandler(int sig, siginfo_t* info, void* ctx);
} // namespace PageFaultHandler
void PageFaultHandler::SignalHandler(int sig, siginfo_t* info, void* ctx)
{
#if defined(__linux__)
void* const exception_address = reinterpret_cast<void*>(info->si_addr);
#if defined(_M_X86)
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.gregs[REG_RIP]);
const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext.gregs[REG_ERR] & 2) != 0;
#elif defined(_M_ARM64)
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.pc);
const bool is_write = IsStoreInstruction(exception_pc);
#endif
#elif defined(__FreeBSD__)
#if defined(_M_X86)
void* const exception_address = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_addr);
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_rip);
const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_err & 2) != 0;
#elif defined(_M_ARM64)
void* const exception_address = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__far);
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__pc);
const bool is_write = IsStoreInstruction(exception_pc);
#endif
#endif
// Executing the handler concurrently from multiple threads wouldn't go down well.
s_exception_handler_mutex.lock();
// Prevent recursive exception filtering.
HandlerResult result = HandlerResult::ExecuteNextHandler;
if (!s_in_exception_handler)
{
s_in_exception_handler = true;
result = HandlePageFault(exception_pc, exception_address, is_write);
s_in_exception_handler = false;
}
s_exception_handler_mutex.unlock();
// Resumes execution right where we left off (re-executes instruction that caused the SIGSEGV).
if (result == HandlerResult::ContinueExecution)
return;
// We couldn't handle it. Pass it off to the crash dumper.
CrashHandler::CrashSignalHandler(sig, info, ctx);
}
bool PageFaultHandler::Install(Error* error)
{
std::unique_lock lock(s_exception_handler_mutex);
pxAssertRel(!s_installed, "Page fault handler has already been installed.");
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO | SA_NODEFER;
sa.sa_sigaction = SignalHandler;
if (sigaction(SIGSEGV, &sa, nullptr) != 0)
{
Error::SetErrno(error, "sigaction() for SIGSEGV failed: ", errno);
return false;
}
#ifdef _M_ARM64
// We can get SIGBUS on ARM64.
if (sigaction(SIGBUS, &sa, nullptr) != 0)
{
Error::SetErrno(error, "sigaction() for SIGBUS failed: ", errno);
return false;
}
#endif
s_installed = true;
return true;
}