libretro: backport libnx support. Fix log source paths
This commit is contained in:
parent
b300bb9f7a
commit
673c2988d6
|
@ -132,9 +132,17 @@ extern int pico_sem_wait(void *sem, int timeout);
|
|||
extern void *pico_thread_create(void *(*routine)(void *), void *arg);
|
||||
#endif /* PICO_SUPPORT_THREADING */
|
||||
|
||||
#ifdef HAVE_LIBNX
|
||||
#include <switch.h>
|
||||
#endif
|
||||
|
||||
static inline void PICO_IDLE(void)
|
||||
{
|
||||
#ifdef HAVE_LIBNX
|
||||
svcSleepThread(5000000);
|
||||
#else
|
||||
usleep(5000);
|
||||
#endif // HAVE_LIBNX
|
||||
}
|
||||
|
||||
#endif /* PICO_SUPPORT_POSIX */
|
||||
|
|
|
@ -51,7 +51,7 @@ void (*EntryPoints[ARAM_SIZE_MAX / 4])();
|
|||
|
||||
#ifdef _WIN32
|
||||
alignas(4096) static u8 ARM7_TCB[ICacheSize];
|
||||
#elif defined(__unix__)
|
||||
#elif defined(__unix__) || defined(HAVE_LIBNX)
|
||||
alignas(4096) static u8 ARM7_TCB[ICacheSize] __attribute__((section(".text")));
|
||||
#elif defined(__APPLE__)
|
||||
alignas(4096) static u8 ARM7_TCB[ICacheSize] __attribute__((section("__TEXT, .text")));
|
||||
|
|
|
@ -3,6 +3,9 @@
|
|||
#include "hw/pvr/pvr_mem.h"
|
||||
#include "hw/sh4/dyna/blockmanager.h"
|
||||
#include "hw/sh4/sh4_mem.h"
|
||||
#if defined(HAVE_LIBNX)
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
#define HANDLER_MAX 0x1F
|
||||
#define HANDLER_COUNT (HANDLER_MAX+1)
|
||||
|
@ -367,6 +370,8 @@ static void* malloc_pages(size_t size) {
|
|||
return _aligned_malloc(size, PAGE_SIZE);
|
||||
#elif defined(_ISOC11_SOURCE)
|
||||
return aligned_alloc(PAGE_SIZE, size);
|
||||
#elif defined(HAVE_LIBNX)
|
||||
return memalign(PAGE_SIZE, size);
|
||||
#else
|
||||
void *data;
|
||||
if (posix_memalign(&data, PAGE_SIZE, size) != 0)
|
||||
|
@ -439,7 +444,7 @@ bool _vmem_reserve()
|
|||
vmemstatus = MemTypeError;
|
||||
|
||||
// Use vmem only if settings mandate so, and if we have proper exception handlers.
|
||||
#ifndef TARGET_NO_EXCEPTIONS
|
||||
#if !defined(TARGET_NO_EXCEPTIONS) && !defined(HAVE_LIBNX)
|
||||
if (!settings.dynarec.disable_nvmem)
|
||||
vmemstatus = vmem_platform_init((void**)&virt_ram_base, (void**)&p_sh4rcb);
|
||||
#endif
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
#include "ta_ctx.h"
|
||||
#include "spg.h"
|
||||
#include "cfg/option.h"
|
||||
#if defined(HAVE_LIBNX)
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
extern u32 fskip;
|
||||
extern u32 FrameCount;
|
||||
|
@ -19,6 +22,8 @@ void* OS_aligned_malloc(size_t align, size_t size)
|
|||
return __mingw_aligned_malloc(size, align);
|
||||
#elif defined(_WIN32)
|
||||
return _aligned_malloc(size, align);
|
||||
#elif defined(HAVE_LIBNX)
|
||||
return memalign(align, size);
|
||||
#else
|
||||
void *result;
|
||||
if (posix_memalign(&result, align, size))
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
u8 SH4_TCB[CODE_SIZE + TEMP_CODE_SIZE + 4096]
|
||||
#if defined(_WIN32) || FEAT_SHREC != DYNAREC_JIT
|
||||
;
|
||||
#elif defined(__unix__)
|
||||
#elif defined(__unix__) || defined(HAVE_LIBNX)
|
||||
__attribute__((section(".text")));
|
||||
#elif defined(__APPLE__)
|
||||
__attribute__((section("__TEXT,.text")));
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#include "types.h"
|
||||
|
||||
#if defined(__unix__) || defined(__APPLE__)
|
||||
#if defined(__unix__) || defined(__APPLE__) || defined(HAVE_LIBNX)
|
||||
#if defined(__APPLE__)
|
||||
#define _XOPEN_SOURCE 1
|
||||
#define __USE_GNU 1
|
||||
|
@ -19,6 +19,10 @@
|
|||
|
||||
#include "hw/sh4/dyna/ngen.h"
|
||||
|
||||
#ifdef HAVE_LIBNX
|
||||
extern "C" char __start__;
|
||||
#endif // HAVE_LIBNX
|
||||
|
||||
#if !defined(TARGET_NO_EXCEPTIONS)
|
||||
bool VramLockedWrite(u8* address);
|
||||
bool BM_LockedWrite(u8* address);
|
||||
|
@ -68,6 +72,12 @@ void fault_handler (int sn, siginfo_t * si, void *segfault_ctx)
|
|||
}
|
||||
#endif
|
||||
ERROR_LOG(COMMON, "SIGSEGV @ %p -> %p was not in vram, dynacode:%d", (void *)ctx.pc, si->si_addr, dyna_cde);
|
||||
#ifdef HAVE_LIBNX
|
||||
MemoryInfo meminfo;
|
||||
u32 pageinfo;
|
||||
svcQueryMemory(&meminfo, &pageinfo, (u64)&__start__);
|
||||
ERROR_LOG(COMMON, ".text base: %p", meminfo.addr);
|
||||
#endif // HAVE_LIBNX
|
||||
die("segfault");
|
||||
signal(SIGSEGV, SIG_DFL);
|
||||
}
|
||||
|
@ -75,12 +85,14 @@ void fault_handler (int sn, siginfo_t * si, void *segfault_ctx)
|
|||
|
||||
void install_fault_handler()
|
||||
{
|
||||
#ifndef HAVE_LIBNX
|
||||
struct sigaction act, segv_oact;
|
||||
memset(&act, 0, sizeof(act));
|
||||
act.sa_sigaction = fault_handler;
|
||||
sigemptyset(&act.sa_mask);
|
||||
act.sa_flags = SA_SIGINFO;
|
||||
sigaction(SIGSEGV, &act, &segv_oact);
|
||||
#endif
|
||||
#if defined(__APPLE__)
|
||||
//this is broken on osx/ios/mach in general
|
||||
sigaction(SIGBUS, &act, &segv_oact);
|
||||
|
|
|
@ -0,0 +1,300 @@
|
|||
#if 1// defined(HAVE_LIBNX)
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <signal.h>
|
||||
|
||||
#include "hw/mem/_vmem.h"
|
||||
#include "stdclass.h"
|
||||
|
||||
#include <switch.h>
|
||||
#include <malloc.h>
|
||||
|
||||
using mem_handle_t = uintptr_t;
|
||||
static mem_handle_t vmem_fd = -1;
|
||||
static mem_handle_t vmem_fd_page = -1;
|
||||
static mem_handle_t vmem_fd_codememory = -1;
|
||||
|
||||
static void *reserved_base;
|
||||
static size_t reserved_size;
|
||||
|
||||
bool mem_region_lock(void *start, size_t len)
|
||||
{
|
||||
size_t inpage = (uintptr_t)start & PAGE_MASK;
|
||||
len += inpage;
|
||||
size_t inlen = len & PAGE_MASK;
|
||||
if (inlen)
|
||||
len = (len + PAGE_SIZE) & ~(PAGE_SIZE-1);
|
||||
|
||||
Result rc;
|
||||
uintptr_t start_addr = (uintptr_t)start - inpage;
|
||||
for (uintptr_t addr = start_addr; addr < (start_addr + len); addr += PAGE_SIZE)
|
||||
{
|
||||
rc = svcSetMemoryPermission((void*)addr, PAGE_SIZE, Perm_R);
|
||||
if (R_FAILED(rc))
|
||||
WARN_LOG(VMEM, "Failed to SetPerm Perm_R on %p len 0x%x rc 0x%x", (void*)addr, PAGE_SIZE, rc);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool mem_region_unlock(void *start, size_t len)
|
||||
{
|
||||
size_t inpage = (uintptr_t)start & PAGE_MASK;
|
||||
len += inpage;
|
||||
size_t inlen = len & PAGE_MASK;
|
||||
if(inlen)
|
||||
len = (len + PAGE_SIZE) & ~(PAGE_SIZE-1);
|
||||
|
||||
Result rc;
|
||||
uintptr_t start_addr = (uintptr_t)start - inpage;
|
||||
for (uintptr_t addr = start_addr; addr < (start_addr + len); addr += PAGE_SIZE)
|
||||
{
|
||||
rc = svcSetMemoryPermission((void*)addr, PAGE_SIZE, Perm_Rw);
|
||||
if (R_FAILED(rc))
|
||||
WARN_LOG(VMEM, "Failed to SetPerm Perm_Rw on %p len 0x%x rc 0x%x", (void*)addr, PAGE_SIZE, rc);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool mem_region_set_exec(void *start, size_t len)
|
||||
{
|
||||
size_t inpage = (uintptr_t)start & PAGE_MASK;
|
||||
|
||||
svcSetMemoryPermission((void*)((uintptr_t)start - inpage), len + inpage, Perm_R); // *shrugs*
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void *mem_region_reserve(void *start, size_t len)
|
||||
{
|
||||
return virtmemReserve(len);
|
||||
}
|
||||
|
||||
bool mem_region_release(void *start, size_t len)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
void *mem_region_map_file(void *file_handle, void *dest, size_t len, size_t offset, bool readwrite)
|
||||
{
|
||||
Result rc = svcMapProcessMemory(dest, envGetOwnProcessHandle(), (u64)(vmem_fd_codememory + offset), len);
|
||||
if (R_FAILED(rc))
|
||||
WARN_LOG(VMEM, "Fatal error creating the view... base: %p offset: 0x%x size: 0x%x src: %p err: 0x%x", vmem_fd, offset, len, vmem_fd_codememory + offset, rc);
|
||||
else
|
||||
INFO_LOG(VMEM, "Created the view... base: %p offset: 0x%x size: 0x%x src: %p err: 0x%x", vmem_fd, offset, len, vmem_fd_codememory + offset, rc);
|
||||
|
||||
return dest;
|
||||
}
|
||||
|
||||
bool mem_region_unmap_file(void *start, size_t len)
|
||||
{
|
||||
return mem_region_release(start, len);
|
||||
}
|
||||
|
||||
// Allocates memory via a fd on shmem/ahmem or even a file on disk
|
||||
static mem_handle_t allocate_shared_filemem(unsigned size)
|
||||
{
|
||||
int fd = -1;
|
||||
void* mem = memalign(0x1000, size);
|
||||
return (uintptr_t)mem;
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
// Implement vmem initialization for RAM, ARAM, VRAM and SH4 context, fpcb etc.
|
||||
// The function supports allocating 512MB or 4GB addr spaces.
|
||||
// vmem_base_addr points to an address space of 512MB (or 4GB) that can be used for fast memory ops.
|
||||
// In negative offsets of the pointer (up to FPCB size, usually 65/129MB) the context and jump table
|
||||
// can be found. If the platform init returns error, the user is responsible for initializing the
|
||||
// memory using a fallback (that is, regular mallocs and falling back to slow memory JIT).
|
||||
VMemType vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr)
|
||||
{
|
||||
const unsigned size_aligned = ((RAM_SIZE_MAX + VRAM_SIZE_MAX + ARAM_SIZE_MAX + PAGE_SIZE) & (~(PAGE_SIZE-1)));
|
||||
vmem_fd_page = allocate_shared_filemem(size_aligned);
|
||||
if (vmem_fd_page < 0)
|
||||
return MemTypeError;
|
||||
|
||||
vmem_fd_codememory = (uintptr_t)virtmemReserve(size_aligned);
|
||||
|
||||
if (R_FAILED(svcMapProcessCodeMemory(envGetOwnProcessHandle(), (u64) vmem_fd_codememory, (u64) vmem_fd_page, size_aligned)))
|
||||
WARN_LOG(VMEM, "Failed to Map memory (platform_int)...");
|
||||
|
||||
if (R_FAILED(svcSetProcessMemoryPermission(envGetOwnProcessHandle(), vmem_fd_codememory, size_aligned, Perm_Rx)))
|
||||
WARN_LOG(VMEM, "Failed to set perms (platform_int)...");
|
||||
|
||||
// Now try to allocate a contiguous piece of memory.
|
||||
VMemType rv;
|
||||
if (reserved_base == NULL)
|
||||
{
|
||||
reserved_size = 512*1024*1024 + sizeof(Sh4RCB) + ARAM_SIZE_MAX + 0x10000;
|
||||
reserved_base = mem_region_reserve(NULL, reserved_size);
|
||||
if (!reserved_base)
|
||||
return MemTypeError;
|
||||
|
||||
rv = MemType512MB;
|
||||
}
|
||||
|
||||
*sh4rcb_addr = reserved_base;
|
||||
*vmem_base_addr = (char *)reserved_base + sizeof(Sh4RCB);
|
||||
const size_t fpcb_size = sizeof(((Sh4RCB *)NULL)->fpcb);
|
||||
void *sh4rcb_base_ptr = (char *)reserved_base + fpcb_size;
|
||||
|
||||
// Now map the memory for the SH4 context, do not include FPCB on purpose (paged on demand).
|
||||
mem_region_unlock(sh4rcb_base_ptr, sizeof(Sh4RCB) - fpcb_size);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
// Just tries to wipe as much as possible in the relevant area.
|
||||
void vmem_platform_destroy()
|
||||
{
|
||||
if (reserved_base != NULL)
|
||||
mem_region_release(reserved_base, reserved_size);
|
||||
}
|
||||
|
||||
// Resets a chunk of memory by deleting its data and setting its protection back.
|
||||
void vmem_platform_reset_mem(void *ptr, unsigned size_bytes) {
|
||||
svcSetMemoryPermission(ptr, size_bytes, Perm_None);
|
||||
}
|
||||
|
||||
// Allocates a bunch of memory (page aligned and page-sized)
|
||||
void vmem_platform_ondemand_page(void *address, unsigned size_bytes) {
|
||||
verify(mem_region_unlock(address, size_bytes));
|
||||
}
|
||||
|
||||
// Creates mappings to the underlying file including mirroring sections
|
||||
void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned nummaps)
|
||||
{
|
||||
for (unsigned i = 0; i < nummaps; i++) {
|
||||
// Ignore unmapped stuff, it is already reserved as PROT_NONE
|
||||
if (!vmem_maps[i].memsize)
|
||||
continue;
|
||||
|
||||
// Calculate the number of mirrors
|
||||
u64 address_range_size = vmem_maps[i].end_address - vmem_maps[i].start_address;
|
||||
unsigned num_mirrors = (address_range_size) / vmem_maps[i].memsize;
|
||||
verify((address_range_size % vmem_maps[i].memsize) == 0 && num_mirrors >= 1);
|
||||
|
||||
for (unsigned j = 0; j < num_mirrors; j++) {
|
||||
u64 offset = vmem_maps[i].start_address + j * vmem_maps[i].memsize;
|
||||
verify(mem_region_unmap_file(&virt_ram_base[offset], vmem_maps[i].memsize));
|
||||
verify(mem_region_map_file((void*)(uintptr_t)vmem_fd, &virt_ram_base[offset],
|
||||
vmem_maps[i].memsize, vmem_maps[i].memoffset, vmem_maps[i].allow_writes) != NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prepares the code region for JIT operations, thus marking it as RWX
|
||||
bool vmem_platform_prepare_jit_block(void *code_area, unsigned size, void **code_area_rwx)
|
||||
{
|
||||
die("Not supported in libnx");
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Use two addr spaces: need to remap something twice, therefore use allocate_shared_filemem()
|
||||
bool vmem_platform_prepare_jit_block(void *code_area, unsigned size, void **code_area_rw, uintptr_t *rx_offset)
|
||||
{
|
||||
const unsigned size_aligned = ((size + PAGE_SIZE) & (~(PAGE_SIZE-1)));
|
||||
void *ptr_rx = code_area;
|
||||
|
||||
if (ptr_rx != code_area)
|
||||
return false;
|
||||
|
||||
void* ptr_rw = virtmemReserve(size_aligned);
|
||||
if (R_FAILED(svcMapProcessMemory(ptr_rw, envGetOwnProcessHandle(), (u64)code_area, size_aligned)))
|
||||
WARN_LOG(DYNAREC, "Failed to map jit rw block...");
|
||||
|
||||
*code_area_rw = ptr_rw;
|
||||
*rx_offset = (char*)ptr_rx - (char*)ptr_rw;
|
||||
INFO_LOG(DYNAREC, "Info: Using NO_RWX mode, rx ptr: %p, rw ptr: %p, offset: %lu\n", ptr_rx, ptr_rw, (unsigned long)*rx_offset);
|
||||
|
||||
return (ptr_rw != MAP_FAILED);
|
||||
}
|
||||
|
||||
#ifndef TARGET_NO_EXCEPTIONS
|
||||
extern "C"
|
||||
{
|
||||
|
||||
alignas(16) u8 __nx_exception_stack[0x1000];
|
||||
u64 __nx_exception_stack_size = sizeof(__nx_exception_stack);
|
||||
|
||||
void context_switch_aarch64(void* context);
|
||||
void fault_handler(int sn, siginfo_t * si, void *segfault_ctx);
|
||||
|
||||
void __libnx_exception_handler(ThreadExceptionDump *ctx)
|
||||
{
|
||||
mcontext_t m_ctx;
|
||||
|
||||
m_ctx.pc = ctx->pc.x;
|
||||
|
||||
for(int i=0; i<29; i++)
|
||||
{
|
||||
// printf("X%d: %p\n", i, ctx->cpu_gprs[i].x);
|
||||
m_ctx.regs[i] = ctx->cpu_gprs[i].x;
|
||||
}
|
||||
|
||||
/*
|
||||
printf("PC: %p\n", ctx->pc.x);
|
||||
printf("FP: %p\n", ctx->fp.x);
|
||||
printf("LR: %p\n", ctx->lr.x);
|
||||
printf("SP: %p\n", ctx->sp.x);
|
||||
*/
|
||||
|
||||
ucontext_t u_ctx;
|
||||
u_ctx.uc_mcontext = m_ctx;
|
||||
|
||||
siginfo_t sig_info;
|
||||
|
||||
sig_info.si_addr = (void*)ctx->far.x;
|
||||
|
||||
signal_handler(0, &sig_info, (void*) &u_ctx);
|
||||
|
||||
uint64_t handle[64] = { 0 };
|
||||
|
||||
uint64_t *ptr = (uint64_t*)handle;
|
||||
ptr[0] = m_ctx.regs[0]; /* x0 0 */
|
||||
ptr[1] = m_ctx.regs[1]; /* x1 8 */
|
||||
ptr[2] = m_ctx.regs[2]; /* x2 16 */
|
||||
ptr[3] = m_ctx.regs[3]; /* x3 24 */
|
||||
ptr[4] = m_ctx.regs[4]; /* x4 32 */
|
||||
ptr[5] = m_ctx.regs[5]; /* x5 40 */
|
||||
ptr[6] = m_ctx.regs[6]; /* x6 48 */
|
||||
ptr[7] = m_ctx.regs[7]; /* x7 56 */
|
||||
/* Non-volatiles. */
|
||||
ptr[8] = m_ctx.regs[8]; /* x8 64 */
|
||||
ptr[9] = m_ctx.regs[9]; /* x9 72 */
|
||||
ptr[10] = m_ctx.regs[10]; /* x10 80 */
|
||||
ptr[11] = m_ctx.regs[11]; /* x11 88 */
|
||||
ptr[12] = m_ctx.regs[12]; /* x12 96 */
|
||||
ptr[13] = m_ctx.regs[13]; /* x13 104 */
|
||||
ptr[14] = m_ctx.regs[14]; /* x14 112 */
|
||||
ptr[15] = m_ctx.regs[15]; /* x15 120 */
|
||||
ptr[16] = m_ctx.regs[16]; /* x16 128 */
|
||||
ptr[17] = m_ctx.regs[17]; /* x17 136 */
|
||||
ptr[18] = m_ctx.regs[18]; /* x18 144 */
|
||||
ptr[19] = m_ctx.regs[19]; /* x19 152 */
|
||||
ptr[20] = m_ctx.regs[20]; /* x20 160 */
|
||||
ptr[21] = m_ctx.regs[21]; /* x21 168 */
|
||||
ptr[22] = m_ctx.regs[22]; /* x22 176 */
|
||||
ptr[23] = m_ctx.regs[23]; /* x23 184 */
|
||||
ptr[24] = m_ctx.regs[24]; /* x24 192 */
|
||||
ptr[25] = m_ctx.regs[25]; /* x25 200 */
|
||||
ptr[26] = m_ctx.regs[26]; /* x26 208 */
|
||||
ptr[27] = m_ctx.regs[27]; /* x27 216 */
|
||||
ptr[28] = m_ctx.regs[28]; /* x28 224 */
|
||||
/* Special regs */
|
||||
ptr[29] = ctx->fp.x; /* frame pointer 232 */
|
||||
ptr[30] = ctx->lr.x; /* link register 240 */
|
||||
ptr[31] = ctx->sp.x; /* stack pointer 248 */
|
||||
ptr[32] = (uintptr_t)ctx->pc.x; /* PC 256 */
|
||||
|
||||
context_switch_aarch64(ptr);
|
||||
}
|
||||
}
|
||||
#endif // TARGET_NO_EXCEPTIONS
|
||||
#endif // HAVE_LIBNX
|
|
@ -11,7 +11,20 @@ constexpr u32 CODEPAGE_WINDOWS_1252 = 1252;
|
|||
#include <locale.h>
|
||||
#endif
|
||||
|
||||
#if !defined(_WIN32) && !defined(__ANDROID__) && !defined(__HAIKU__) && !defined(__OpenBSD__)
|
||||
#if defined(HAVE_LIBNX) || defined(__HAIKU__)
|
||||
int vasprintf(char **s, const char *fmt, va_list ap)
|
||||
{
|
||||
va_list ap2;
|
||||
va_copy(ap2, ap);
|
||||
int l = vsnprintf(0, 0, fmt, ap2);
|
||||
va_end(ap2);
|
||||
|
||||
if (l<0 || !(*s=malloc(l+1U))) return -1;
|
||||
return vsnprintf(*s, l+1U, fmt, ap);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(_WIN32) && !defined(__ANDROID__) && !defined(__HAIKU__) && !defined(__OpenBSD__) && !defined(HAVE_LIBNX)
|
||||
static locale_t GetCLocale()
|
||||
{
|
||||
static locale_t c_locale = newlocale(LC_ALL_MASK, "C", nullptr);
|
||||
|
@ -56,11 +69,11 @@ bool CharArrayFromFormatV(char* out, int outsize, const char* format, va_list ar
|
|||
writtenCount = vsnprintf(out, outsize, format, args);
|
||||
#endif
|
||||
#else
|
||||
#if !defined(__ANDROID__) && !defined(__HAIKU__) && !defined(__OpenBSD__)
|
||||
#if !defined(__ANDROID__) && !defined(__HAIKU__) && !defined(__OpenBSD__) && !defined(HAVE_LIBNX)
|
||||
locale_t previousLocale = uselocale(GetCLocale());
|
||||
#endif
|
||||
writtenCount = vsnprintf(out, outsize, format, args);
|
||||
#if !defined(__ANDROID__) && !defined(__HAIKU__) && !defined(__OpenBSD__)
|
||||
#if !defined(__ANDROID__) && !defined(__HAIKU__) && !defined(__OpenBSD__) && !defined(HAVE_LIBNX)
|
||||
uselocale(previousLocale);
|
||||
#endif
|
||||
#endif
|
||||
|
@ -88,7 +101,7 @@ std::string StringFromFormatV(const char* format, va_list args)
|
|||
std::string temp = buf;
|
||||
delete[] buf;
|
||||
#else
|
||||
#if !defined(__ANDROID__) && !defined(__HAIKU__) && !defined(__OpenBSD__)
|
||||
#if !defined(__ANDROID__) && !defined(__HAIKU__) && !defined(__OpenBSD__) && !defined(HAVE_LIBNX)
|
||||
locale_t previousLocale = uselocale(GetCLocale());
|
||||
#endif
|
||||
if (vasprintf(&buf, format, args) < 0)
|
||||
|
@ -97,7 +110,7 @@ std::string StringFromFormatV(const char* format, va_list args)
|
|||
buf = nullptr;
|
||||
}
|
||||
|
||||
#if !defined(__ANDROID__) && !defined(__HAIKU__) && !defined(__OpenBSD__)
|
||||
#if !defined(__ANDROID__) && !defined(__HAIKU__) && !defined(__OpenBSD__) && !defined(HAVE_LIBNX)
|
||||
uselocale(previousLocale);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -23,7 +23,14 @@
|
|||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
#ifdef HAVE_LIBNX
|
||||
#include <switch.h>
|
||||
#define INADDR_NONE 0xffffffff
|
||||
#define INET_ADDRSTRLEN sizeof(struct sockaddr_in)
|
||||
#define SOL_TCP 6 // Shrug
|
||||
#else
|
||||
#include <netinet/ip.h>
|
||||
#endif // HAVE_LIBNX
|
||||
#include <netinet/tcp.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <netdb.h>
|
||||
|
|
|
@ -27,6 +27,9 @@
|
|||
#include <cstdint>
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#ifdef HAVE_LIBNX
|
||||
#include <strings.h>
|
||||
#endif
|
||||
|
||||
//basic types
|
||||
typedef int8_t s8;
|
||||
|
|
|
@ -30,9 +30,9 @@ void GenericLog(LogTypes::LOG_LEVELS level, LogTypes::LOG_TYPE type, const char*
|
|||
|
||||
static size_t DeterminePathCutOffPoint()
|
||||
{
|
||||
constexpr const char* pattern = "core/";
|
||||
constexpr const char* pattern = "shell/";
|
||||
#ifdef _WIN32
|
||||
constexpr const char* pattern2 = "core\\";
|
||||
constexpr const char* pattern2 = "shell\\";
|
||||
#endif
|
||||
std::string path = __FILE__;
|
||||
std::transform(path.begin(), path.end(), path.begin(),
|
||||
|
@ -43,7 +43,7 @@ static size_t DeterminePathCutOffPoint()
|
|||
pos = path.find(pattern2);
|
||||
#endif
|
||||
if (pos != std::string::npos)
|
||||
return pos + strlen(pattern);
|
||||
return pos;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,6 @@ Option<int> MaxThreads("", 3);
|
|||
Option<int> AutoSkipFrame(CORE_OPTION_NAME "_auto_skip_frame", 0);
|
||||
Option<int> RenderResolution("", 480);
|
||||
Option<bool> VSync("", true);
|
||||
// TODO
|
||||
Option<bool> ThreadedRendering(CORE_OPTION_NAME "_threaded_rendering", true);
|
||||
Option<int> AnisotropicFiltering(CORE_OPTION_NAME "_anisotropic_filtering");
|
||||
Option<bool> PowerVR2Filter(CORE_OPTION_NAME "_pvr2_filtering");
|
||||
|
|
Loading…
Reference in New Issue