mirror of https://github.com/PCSX2/pcsx2.git
System: Revamp memory allocation
Guest memory is now mapped into a shared memory/file mapping, for use with fastmem. 64-bit and 128-bit arguments are passed by register/value instead of by reference/address. LDL/LDR/SDL/SDR now use 64-bit GPRs instead of SSE.
This commit is contained in:
parent
92ad6f3baf
commit
00bcb4cf02
|
@ -89,4 +89,11 @@ namespace Common
|
|||
value++;
|
||||
return value;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static constexpr T PageAlign(T size)
|
||||
{
|
||||
static_assert(Common::IsPow2(__pagesize), "Page size is a power of 2");
|
||||
return Common::AlignUpPow2(size, __pagesize);
|
||||
}
|
||||
} // namespace Common
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include "common/Pcsx2Defs.h"
|
||||
|
||||
|
@ -121,35 +123,31 @@ static __fi PageProtectionMode PageAccess_Any()
|
|||
// platform prior to wxWidgets .. it should prolly be removed -- air)
|
||||
namespace HostSys
|
||||
{
|
||||
void* MmapReserve(uptr base, size_t size);
|
||||
bool MmapCommit(uptr base, size_t size, const PageProtectionMode& mode);
|
||||
void MmapReset(uptr base, size_t size);
|
||||
|
||||
void* MmapReservePtr(void* base, size_t size);
|
||||
bool MmapCommitPtr(void* base, size_t size, const PageProtectionMode& mode);
|
||||
void MmapResetPtr(void* base, size_t size);
|
||||
|
||||
// Maps a block of memory for use as a recompiled code buffer.
|
||||
// Returns NULL on allocation failure.
|
||||
extern void* Mmap(uptr base, size_t size);
|
||||
extern void* Mmap(void* base, size_t size, const PageProtectionMode& mode);
|
||||
|
||||
// Unmaps a block allocated by SysMmap
|
||||
extern void Munmap(uptr base, size_t size);
|
||||
extern void Munmap(void* base, size_t size);
|
||||
|
||||
extern void MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode);
|
||||
|
||||
extern void Munmap(void* base, size_t size);
|
||||
|
||||
template <uint size>
|
||||
void MemProtectStatic(u8 (&arr)[size], const PageProtectionMode& mode)
|
||||
{
|
||||
MemProtect(arr, size, mode);
|
||||
}
|
||||
} // namespace HostSys
|
||||
|
||||
extern std::string GetFileMappingName(const char* prefix);
|
||||
extern void* CreateSharedMemory(const char* name, size_t size);
|
||||
extern void DestroySharedMemory(void* ptr);
|
||||
extern void* MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode);
|
||||
extern void UnmapSharedMemory(void* baseaddr, size_t size);
|
||||
}
|
||||
|
||||
// Safe version of Munmap -- NULLs the pointer variable immediately after free'ing it.
|
||||
#define SafeSysMunmap(ptr, size) \
|
||||
((void)(HostSys::Munmap((uptr)(ptr), size), (ptr) = 0))
|
||||
((void)(HostSys::Munmap(ptr, size), (ptr) = 0))
|
||||
|
||||
extern void InitCPUTicks();
|
||||
extern u64 GetTickFrequency();
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <sys/mman.h>
|
||||
#include <signal.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "fmt/core.h"
|
||||
|
@ -96,14 +97,9 @@ void _platform_InstallSignalHandler()
|
|||
#endif
|
||||
}
|
||||
|
||||
// returns FALSE if the mprotect call fails with an ENOMEM.
|
||||
// Raises assertions on other types of POSIX errors (since those typically reflect invalid object
|
||||
// or memory states).
|
||||
static bool _memprotect(void* baseaddr, size_t size, const PageProtectionMode& mode)
|
||||
static __ri uint LinuxProt(const PageProtectionMode& mode)
|
||||
{
|
||||
pxAssertDev((size & (__pagesize - 1)) == 0, "Size is page aligned");
|
||||
|
||||
uint lnxmode = 0;
|
||||
u32 lnxmode = 0;
|
||||
|
||||
if (mode.CanWrite())
|
||||
lnxmode |= PROT_WRITE;
|
||||
|
@ -112,109 +108,101 @@ static bool _memprotect(void* baseaddr, size_t size, const PageProtectionMode& m
|
|||
if (mode.CanExecute())
|
||||
lnxmode |= PROT_EXEC | PROT_READ;
|
||||
|
||||
const int result = mprotect(baseaddr, size, lnxmode);
|
||||
|
||||
if (result == 0)
|
||||
return true;
|
||||
|
||||
switch (errno)
|
||||
{
|
||||
case EINVAL:
|
||||
pxFailDev(fmt::format("mprotect returned EINVAL @ 0x{:X} -> 0x{:X} (mode={})",
|
||||
baseaddr, (uptr)baseaddr + size, mode.ToString()).c_str());
|
||||
break;
|
||||
|
||||
case EACCES:
|
||||
pxFailDev(fmt::format("mprotect returned EACCES @ 0x{:X} -> 0x{:X} (mode={})",
|
||||
baseaddr, (uptr)baseaddr + size, mode.ToString()).c_str());
|
||||
break;
|
||||
|
||||
case ENOMEM:
|
||||
// caller handles assertion or exception, or whatever.
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
return lnxmode;
|
||||
}
|
||||
|
||||
void* HostSys::MmapReservePtr(void* base, size_t size)
|
||||
void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
pxAssertDev((size & (__pagesize - 1)) == 0, "Size is page aligned");
|
||||
|
||||
// On linux a reserve-without-commit is performed by using mmap on a read-only
|
||||
// or anonymous source, with PROT_NONE (no-access) permission. Since the mapping
|
||||
// is completely inaccessible, the OS will simply reserve it and will not put it
|
||||
// against the commit table.
|
||||
void* result = mmap(base, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
||||
|
||||
if (result == MAP_FAILED)
|
||||
result = nullptr;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool HostSys::MmapCommitPtr(void* base, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
// In linux, reserved memory is automatically committed when its permissions are
|
||||
// changed to something other than PROT_NONE. If the user is committing memory
|
||||
// as PROT_NONE, then just ignore this call (memory will be committed automatically
|
||||
// later when the user changes permissions to something useful via calls to MemProtect).
|
||||
|
||||
if (mode.IsNone())
|
||||
return false;
|
||||
return nullptr;
|
||||
|
||||
if (_memprotect(base, size, mode))
|
||||
return true;
|
||||
const u32 prot = LinuxProt(mode);
|
||||
|
||||
return false;
|
||||
u32 flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
if (base)
|
||||
flags |= MAP_FIXED;
|
||||
|
||||
void* res = mmap(base, size, prot, flags, -1, 0);
|
||||
if (res == MAP_FAILED)
|
||||
return nullptr;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void HostSys::MmapResetPtr(void* base, size_t size)
|
||||
{
|
||||
pxAssertDev((size & (__pagesize - 1)) == 0, "Size is page aligned");
|
||||
|
||||
void* result = mmap(base, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
||||
|
||||
pxAssertRel((uptr)result == (uptr)base, "Virtual memory decommit failed");
|
||||
}
|
||||
|
||||
void* HostSys::MmapReserve(uptr base, size_t size)
|
||||
{
|
||||
return MmapReservePtr((void*)base, size);
|
||||
}
|
||||
|
||||
bool HostSys::MmapCommit(uptr base, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
return MmapCommitPtr((void*)base, size, mode);
|
||||
}
|
||||
|
||||
void HostSys::MmapReset(uptr base, size_t size)
|
||||
{
|
||||
MmapResetPtr((void*)base, size);
|
||||
}
|
||||
|
||||
void* HostSys::Mmap(uptr base, size_t size)
|
||||
{
|
||||
pxAssertDev((size & (__pagesize - 1)) == 0, "Size is page aligned");
|
||||
|
||||
// MAP_ANONYMOUS - means we have no associated file handle (or device).
|
||||
|
||||
return mmap((void*)base, size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
}
|
||||
|
||||
void HostSys::Munmap(uptr base, size_t size)
|
||||
void HostSys::Munmap(void* base, size_t size)
|
||||
{
|
||||
if (!base)
|
||||
return;
|
||||
|
||||
munmap((void*)base, size);
|
||||
}
|
||||
|
||||
void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
if (!_memprotect(baseaddr, size, mode))
|
||||
{
|
||||
throw Exception::OutOfMemory("MemProtect")
|
||||
.SetDiagMsg(fmt::format("mprotect failed @ 0x{:X} -> 0x{:X} (mode={})",
|
||||
baseaddr, (uptr)baseaddr + size, mode.ToString()));
|
||||
}
|
||||
pxAssertDev((size & (__pagesize - 1)) == 0, "Size is page aligned");
|
||||
|
||||
const u32 lnxmode = LinuxProt(mode);
|
||||
|
||||
const int result = mprotect(baseaddr, size, lnxmode);
|
||||
if (result != 0)
|
||||
pxFail("mprotect() failed");
|
||||
}
|
||||
|
||||
std::string HostSys::GetFileMappingName(const char* prefix)
|
||||
{
|
||||
const unsigned pid = static_cast<unsigned>(getpid());
|
||||
return fmt::format("{}_{}", prefix, pid);
|
||||
}
|
||||
|
||||
void* HostSys::CreateSharedMemory(const char* name, size_t size)
|
||||
{
|
||||
const int fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600);
|
||||
if (fd < 0)
|
||||
{
|
||||
std::fprintf(stderr, "shm_open failed: %d\n", errno);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// we're not going to be opening this mapping in other processes, so remove the file
|
||||
shm_unlink(name);
|
||||
|
||||
// ensure it's the correct size
|
||||
#ifndef __APPLE__
|
||||
if (ftruncate64(fd, static_cast<off64_t>(size)) < 0)
|
||||
#else
|
||||
if (ftruncate(fd, static_cast<off_t>(size)) < 0)
|
||||
#endif
|
||||
{
|
||||
std::fprintf(stderr, "ftruncate64(%zu) failed: %d\n", size, errno);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return reinterpret_cast<void*>(static_cast<intptr_t>(fd));
|
||||
}
|
||||
|
||||
void HostSys::DestroySharedMemory(void* ptr)
|
||||
{
|
||||
close(static_cast<int>(reinterpret_cast<intptr_t>(ptr)));
|
||||
}
|
||||
|
||||
void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
const uint lnxmode = LinuxProt(mode);
|
||||
|
||||
const int flags = (baseaddr != nullptr) ? (MAP_SHARED | MAP_FIXED) : MAP_SHARED;
|
||||
void* ptr = mmap(baseaddr, size, lnxmode, flags, static_cast<int>(reinterpret_cast<intptr_t>(handle)), static_cast<off_t>(offset));
|
||||
if (ptr == MAP_FAILED)
|
||||
return nullptr;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void HostSys::UnmapSharedMemory(void* baseaddr, size_t size)
|
||||
{
|
||||
if (mmap(baseaddr, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED)
|
||||
pxFailRel("Failed to unmap shared memory");
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -138,7 +138,8 @@ class VirtualMemoryManager
|
|||
|
||||
std::string m_name;
|
||||
|
||||
uptr m_baseptr;
|
||||
void* m_file_handle;
|
||||
u8* m_baseptr;
|
||||
|
||||
// An array to track page usage (to trigger asserts if things try to overlap)
|
||||
std::atomic<bool>* m_pageuse;
|
||||
|
@ -150,18 +151,21 @@ public:
|
|||
// If upper_bounds is nonzero and the OS fails to allocate memory that is below it,
|
||||
// calls to IsOk() will return false and Alloc() will always return null pointers
|
||||
// strict indicates that the allocation should quietly fail if the memory can't be mapped at `base`
|
||||
VirtualMemoryManager(std::string name, uptr base, size_t size, uptr upper_bounds = 0, bool strict = false);
|
||||
VirtualMemoryManager(std::string name, const char* file_mapping_name, uptr base, size_t size, uptr upper_bounds = 0, bool strict = false);
|
||||
~VirtualMemoryManager();
|
||||
|
||||
void* GetBase() const { return (void*)m_baseptr; }
|
||||
bool IsSharedMemory() const { return (m_file_handle != nullptr); }
|
||||
void* GetFileHandle() const { return m_file_handle; }
|
||||
u8* GetBase() const { return m_baseptr; }
|
||||
u8* GetEnd() const { return (m_baseptr + m_pages_reserved * __pagesize); }
|
||||
|
||||
// Request the use of the memory at offsetLocation bytes from the start of the reserved memory area
|
||||
// offsetLocation must be page-aligned
|
||||
void* Alloc(uptr offsetLocation, size_t size) const;
|
||||
u8* Alloc(uptr offsetLocation, size_t size) const;
|
||||
|
||||
void* AllocAtAddress(void* address, size_t size) const
|
||||
u8* AllocAtAddress(void* address, size_t size) const
|
||||
{
|
||||
return Alloc(size, (uptr)address - m_baseptr);
|
||||
return Alloc(size, static_cast<const u8*>(address) - m_baseptr);
|
||||
}
|
||||
|
||||
void Free(void* address, size_t size) const;
|
||||
|
@ -179,12 +183,12 @@ typedef std::shared_ptr<const VirtualMemoryManager> VirtualMemoryManagerPtr;
|
|||
class VirtualMemoryBumpAllocator
|
||||
{
|
||||
const VirtualMemoryManagerPtr m_allocator;
|
||||
std::atomic<uptr> m_baseptr{0};
|
||||
const uptr m_endptr = 0;
|
||||
std::atomic<u8*> m_baseptr{0};
|
||||
const u8* m_endptr = 0;
|
||||
|
||||
public:
|
||||
VirtualMemoryBumpAllocator(VirtualMemoryManagerPtr allocator, size_t size, uptr offsetLocation);
|
||||
void* Alloc(size_t size);
|
||||
u8* Alloc(size_t size);
|
||||
const VirtualMemoryManagerPtr& GetAllocator() { return m_allocator; }
|
||||
};
|
||||
|
||||
|
@ -201,79 +205,31 @@ protected:
|
|||
// Where the memory came from (so we can return it)
|
||||
VirtualMemoryManagerPtr m_allocator;
|
||||
|
||||
// Default size of the reserve, in bytes. Can be specified when the object is constructed.
|
||||
// Is used as the reserve size when Reserve() is called, unless an override is specified
|
||||
// in the Reserve parameters.
|
||||
size_t m_defsize;
|
||||
|
||||
void* m_baseptr;
|
||||
|
||||
// reserved memory (in pages).
|
||||
uptr m_pages_reserved;
|
||||
|
||||
// Records the number of pages committed to memory.
|
||||
// (metric for analysis of buffer usage)
|
||||
uptr m_pages_commited;
|
||||
|
||||
// Protection mode to be applied to committed blocks.
|
||||
PageProtectionMode m_prot_mode;
|
||||
|
||||
// Controls write access to the entire reserve. When true (the default), the reserve
|
||||
// operates normally. When set to false, all committed blocks are re-protected with
|
||||
// write disabled, and accesses to uncommitted blocks (read or write) will cause a GPF
|
||||
// as well.
|
||||
bool m_allow_writes;
|
||||
|
||||
// Allows the implementation to decide how much memory it needs to allocate if someone requests the given size
|
||||
// Should translate requests of size 0 to m_defsize
|
||||
virtual size_t GetSize(size_t requestedSize);
|
||||
u8* m_baseptr = nullptr;
|
||||
size_t m_size = 0;
|
||||
|
||||
public:
|
||||
VirtualMemoryReserve(std::string name, size_t size = 0);
|
||||
virtual ~VirtualMemoryReserve()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
VirtualMemoryReserve(std::string name);
|
||||
virtual ~VirtualMemoryReserve();
|
||||
|
||||
// Initialize with the given piece of memory
|
||||
// Note: The memory is already allocated, the allocator is for future use to free the region
|
||||
// It may be null in which case there is no way to free the memory in a way it will be usable again
|
||||
virtual void* Assign(VirtualMemoryManagerPtr allocator, void* baseptr, size_t size);
|
||||
void Assign(VirtualMemoryManagerPtr allocator, u8* baseptr, size_t size);
|
||||
|
||||
void* Reserve(VirtualMemoryManagerPtr allocator, uptr baseOffset, size_t size = 0)
|
||||
{
|
||||
size = GetSize(size);
|
||||
void* allocation = allocator->Alloc(baseOffset, size);
|
||||
return Assign(std::move(allocator), allocation, size);
|
||||
}
|
||||
void* Reserve(VirtualMemoryBumpAllocator& allocator, size_t size = 0)
|
||||
{
|
||||
size = GetSize(size);
|
||||
return Assign(allocator.GetAllocator(), allocator.Alloc(size), size);
|
||||
}
|
||||
u8* BumpAllocate(VirtualMemoryBumpAllocator& allocator, size_t size);
|
||||
|
||||
virtual void Reset();
|
||||
virtual void Release();
|
||||
virtual bool TryResize(uint newsize);
|
||||
virtual bool Commit();
|
||||
|
||||
virtual void ForbidModification();
|
||||
virtual void AllowModification();
|
||||
void Release();
|
||||
|
||||
bool IsOk() const { return m_baseptr != NULL; }
|
||||
const std::string& GetName() const { return m_name; }
|
||||
|
||||
uptr GetReserveSizeInBytes() const { return m_pages_reserved * __pagesize; }
|
||||
uptr GetReserveSizeInPages() const { return m_pages_reserved; }
|
||||
uint GetCommittedPageCount() const { return m_pages_commited; }
|
||||
uint GetCommittedBytes() const { return m_pages_commited * __pagesize; }
|
||||
u8* GetPtr() { return m_baseptr; }
|
||||
const u8* GetPtr() const { return m_baseptr; }
|
||||
u8* GetPtrEnd() { return m_baseptr + m_size; }
|
||||
const u8* GetPtrEnd() const { return m_baseptr + m_size; }
|
||||
|
||||
u8* GetPtr() { return (u8*)m_baseptr; }
|
||||
const u8* GetPtr() const { return (u8*)m_baseptr; }
|
||||
u8* GetPtrEnd() { return (u8*)m_baseptr + (m_pages_reserved * __pagesize); }
|
||||
const u8* GetPtrEnd() const { return (u8*)m_baseptr + (m_pages_reserved * __pagesize); }
|
||||
|
||||
VirtualMemoryReserve& SetPageAccessOnCommit(const PageProtectionMode& mode);
|
||||
size_t GetSize() const { return m_size; }
|
||||
|
||||
operator void*() { return m_baseptr; }
|
||||
operator const void*() const { return m_baseptr; }
|
||||
|
@ -283,18 +239,15 @@ public:
|
|||
|
||||
u8& operator[](uint idx)
|
||||
{
|
||||
pxAssert(idx < (m_pages_reserved * __pagesize));
|
||||
pxAssert(idx < m_size);
|
||||
return *((u8*)m_baseptr + idx);
|
||||
}
|
||||
|
||||
const u8& operator[](uint idx) const
|
||||
{
|
||||
pxAssert(idx < (m_pages_reserved * __pagesize));
|
||||
pxAssert(idx < m_size);
|
||||
return *((u8*)m_baseptr + idx);
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void ReprotectCommittedBlocks(const PageProtectionMode& newmode);
|
||||
};
|
||||
|
||||
#ifdef __POSIX__
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#endif
|
||||
|
||||
#include "Pcsx2Types.h"
|
||||
#include <cstddef>
|
||||
|
||||
#include "common/emitter/x86_intrin.h"
|
||||
|
||||
|
@ -80,7 +81,9 @@
|
|||
// Defines the memory page size for the target platform at compilation. All supported platforms
|
||||
// (which means Intel only right now) have a 4k granularity.
|
||||
#define PCSX2_PAGESIZE 0x1000
|
||||
static const int __pagesize = PCSX2_PAGESIZE;
|
||||
static constexpr int __pagesize = PCSX2_PAGESIZE;
|
||||
static constexpr int __pageshift = 12;
|
||||
static constexpr size_t __pagemask = PCSX2_PAGESIZE - 1;
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// Microsoft Visual Studio
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
* If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "common/Align.h"
|
||||
#include "common/PageFaultSource.h"
|
||||
#include "common/EventSource.inl"
|
||||
#include "common/MemsetFast.inl"
|
||||
|
@ -68,17 +69,13 @@ void SrcType_PageFault::_DispatchRaw(ListenerIterator iter, const ListenerIterat
|
|||
} while ((++iter != iend) && !m_handled);
|
||||
}
|
||||
|
||||
static size_t pageAlign(size_t size)
|
||||
{
|
||||
return (size + __pagesize - 1) / __pagesize * __pagesize;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// VirtualMemoryManager (implementations)
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
||||
VirtualMemoryManager::VirtualMemoryManager(std::string name, uptr base, size_t size, uptr upper_bounds, bool strict)
|
||||
VirtualMemoryManager::VirtualMemoryManager(std::string name, const char* file_mapping_name, uptr base, size_t size, uptr upper_bounds, bool strict)
|
||||
: m_name(std::move(name))
|
||||
, m_file_handle(nullptr)
|
||||
, m_baseptr(0)
|
||||
, m_pageuse(nullptr)
|
||||
, m_pages_reserved(0)
|
||||
|
@ -86,34 +83,72 @@ VirtualMemoryManager::VirtualMemoryManager(std::string name, uptr base, size_t s
|
|||
if (!size)
|
||||
return;
|
||||
|
||||
uptr reserved_bytes = pageAlign(size);
|
||||
size_t reserved_bytes = Common::PageAlign(size);
|
||||
m_pages_reserved = reserved_bytes / __pagesize;
|
||||
|
||||
m_baseptr = (uptr)HostSys::MmapReserve(base, reserved_bytes);
|
||||
|
||||
if (!m_baseptr || (upper_bounds != 0 && (((uptr)m_baseptr + reserved_bytes) > upper_bounds)))
|
||||
if (file_mapping_name && file_mapping_name[0])
|
||||
{
|
||||
DevCon.Warning("%s: host memory @ 0x%016" PRIXPTR " -> 0x%016" PRIXPTR " is unavailable; attempting to map elsewhere...",
|
||||
m_name.c_str(), base, base + size);
|
||||
std::string real_file_mapping_name(HostSys::GetFileMappingName(file_mapping_name));
|
||||
m_file_handle = HostSys::CreateSharedMemory(real_file_mapping_name.c_str(), reserved_bytes);
|
||||
if (!m_file_handle)
|
||||
return;
|
||||
|
||||
SafeSysMunmap(m_baseptr, reserved_bytes);
|
||||
|
||||
if (base)
|
||||
m_baseptr = static_cast<u8*>(HostSys::MapSharedMemory(m_file_handle, 0, (void*)base, reserved_bytes, PageAccess_ReadWrite()));
|
||||
if (!m_baseptr || (upper_bounds != 0 && (((uptr)m_baseptr + reserved_bytes) > upper_bounds)))
|
||||
{
|
||||
// Let's try again at an OS-picked memory area, and then hope it meets needed
|
||||
// boundschecking criteria below.
|
||||
m_baseptr = (uptr)HostSys::MmapReserve(0, reserved_bytes);
|
||||
DevCon.Warning("%s: host memory @ 0x%016" PRIXPTR " -> 0x%016" PRIXPTR " is unavailable; attempting to map elsewhere...",
|
||||
m_name.c_str(), base, base + size);
|
||||
|
||||
SafeSysMunmap(m_baseptr, reserved_bytes);
|
||||
|
||||
if (base)
|
||||
{
|
||||
// Let's try again at an OS-picked memory area, and then hope it meets needed
|
||||
// boundschecking criteria below.
|
||||
m_baseptr = static_cast<u8*>(HostSys::MapSharedMemory(m_file_handle, 0, nullptr, reserved_bytes, PageAccess_ReadWrite()));
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
m_baseptr = static_cast<u8*>(HostSys::Mmap((void*)base, reserved_bytes, PageAccess_Any()));
|
||||
|
||||
if (!m_baseptr || (upper_bounds != 0 && (((uptr)m_baseptr + reserved_bytes) > upper_bounds)))
|
||||
{
|
||||
DevCon.Warning("%s: host memory @ 0x%016" PRIXPTR " -> 0x%016" PRIXPTR " is unavailable; attempting to map elsewhere...",
|
||||
m_name.c_str(), base, base + size);
|
||||
|
||||
SafeSysMunmap(m_baseptr, reserved_bytes);
|
||||
|
||||
if (base)
|
||||
{
|
||||
// Let's try again at an OS-picked memory area, and then hope it meets needed
|
||||
// boundschecking criteria below.
|
||||
m_baseptr = static_cast<u8*>(HostSys::Mmap(0, reserved_bytes, PageAccess_Any()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool fulfillsRequirements = true;
|
||||
if (strict && m_baseptr != base)
|
||||
if (strict && (uptr)m_baseptr != base)
|
||||
fulfillsRequirements = false;
|
||||
if ((upper_bounds != 0) && ((m_baseptr + reserved_bytes) > upper_bounds))
|
||||
if ((upper_bounds != 0) && ((uptr)(m_baseptr + reserved_bytes) > upper_bounds))
|
||||
fulfillsRequirements = false;
|
||||
if (!fulfillsRequirements)
|
||||
{
|
||||
SafeSysMunmap(m_baseptr, reserved_bytes);
|
||||
if (m_file_handle)
|
||||
{
|
||||
if (m_baseptr)
|
||||
HostSys::UnmapSharedMemory(m_baseptr, reserved_bytes);
|
||||
m_baseptr = 0;
|
||||
|
||||
HostSys::DestroySharedMemory(m_file_handle);
|
||||
m_file_handle = nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
SafeSysMunmap(m_baseptr, reserved_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
if (!m_baseptr)
|
||||
|
@ -137,7 +172,14 @@ VirtualMemoryManager::~VirtualMemoryManager()
|
|||
if (m_pageuse)
|
||||
delete[] m_pageuse;
|
||||
if (m_baseptr)
|
||||
HostSys::Munmap(m_baseptr, m_pages_reserved * __pagesize);
|
||||
{
|
||||
if (m_file_handle)
|
||||
HostSys::UnmapSharedMemory((void*)m_baseptr, m_pages_reserved * __pagesize);
|
||||
else
|
||||
HostSys::Munmap(m_baseptr, m_pages_reserved * __pagesize);
|
||||
}
|
||||
if (m_file_handle)
|
||||
HostSys::DestroySharedMemory(m_file_handle);
|
||||
}
|
||||
|
||||
static bool VMMMarkPagesAsInUse(std::atomic<bool>* begin, std::atomic<bool>* end)
|
||||
|
@ -164,9 +206,9 @@ static bool VMMMarkPagesAsInUse(std::atomic<bool>* begin, std::atomic<bool>* end
|
|||
return true;
|
||||
}
|
||||
|
||||
void* VirtualMemoryManager::Alloc(uptr offsetLocation, size_t size) const
|
||||
u8* VirtualMemoryManager::Alloc(uptr offsetLocation, size_t size) const
|
||||
{
|
||||
size = pageAlign(size);
|
||||
size = Common::PageAlign(size);
|
||||
if (!pxAssertDev(offsetLocation % __pagesize == 0, "(VirtualMemoryManager) alloc at unaligned offsetLocation"))
|
||||
return nullptr;
|
||||
if (!pxAssertDev(size + offsetLocation <= m_pages_reserved * __pagesize, "(VirtualMemoryManager) alloc outside reserved area"))
|
||||
|
@ -177,15 +219,15 @@ void* VirtualMemoryManager::Alloc(uptr offsetLocation, size_t size) const
|
|||
auto puEnd = &m_pageuse[(offsetLocation + size) / __pagesize];
|
||||
if (!pxAssertDev(VMMMarkPagesAsInUse(puStart, puEnd), "(VirtualMemoryManager) allocation requests overlapped"))
|
||||
return nullptr;
|
||||
return (void*)(m_baseptr + offsetLocation);
|
||||
return m_baseptr + offsetLocation;
|
||||
}
|
||||
|
||||
void VirtualMemoryManager::Free(void* address, size_t size) const
|
||||
{
|
||||
uptr offsetLocation = (uptr)address - m_baseptr;
|
||||
uptr offsetLocation = (uptr)address - (uptr)m_baseptr;
|
||||
if (!pxAssertDev(offsetLocation % __pagesize == 0, "(VirtualMemoryManager) free at unaligned address"))
|
||||
{
|
||||
uptr newLoc = pageAlign(offsetLocation);
|
||||
uptr newLoc = Common::PageAlign(offsetLocation);
|
||||
size -= (offsetLocation - newLoc);
|
||||
offsetLocation = newLoc;
|
||||
}
|
||||
|
@ -210,55 +252,39 @@ void VirtualMemoryManager::Free(void* address, size_t size) const
|
|||
// --------------------------------------------------------------------------------------
|
||||
VirtualMemoryBumpAllocator::VirtualMemoryBumpAllocator(VirtualMemoryManagerPtr allocator, uptr offsetLocation, size_t size)
|
||||
: m_allocator(std::move(allocator))
|
||||
, m_baseptr((uptr)m_allocator->Alloc(offsetLocation, size))
|
||||
, m_baseptr(m_allocator->Alloc(offsetLocation, size))
|
||||
, m_endptr(m_baseptr + size)
|
||||
{
|
||||
if (m_baseptr.load() == 0)
|
||||
pxAssertDev(0, "(VirtualMemoryBumpAllocator) tried to construct from bad VirtualMemoryManager");
|
||||
}
|
||||
|
||||
void* VirtualMemoryBumpAllocator::Alloc(size_t size)
|
||||
u8* VirtualMemoryBumpAllocator::Alloc(size_t size)
|
||||
{
|
||||
if (m_baseptr.load() == 0) // True if constructed from bad VirtualMemoryManager (assertion was on initialization)
|
||||
return nullptr;
|
||||
|
||||
size_t reservedSize = pageAlign(size);
|
||||
size_t reservedSize = Common::PageAlign(size);
|
||||
|
||||
uptr out = m_baseptr.fetch_add(reservedSize, std::memory_order_relaxed);
|
||||
u8* out = m_baseptr.fetch_add(reservedSize, std::memory_order_relaxed);
|
||||
|
||||
if (!pxAssertDev(out - reservedSize + size <= m_endptr, "(VirtualMemoryBumpAllocator) ran out of memory"))
|
||||
return nullptr;
|
||||
|
||||
return (void*)out;
|
||||
return out;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// VirtualMemoryReserve (implementations)
|
||||
// --------------------------------------------------------------------------------------
|
||||
VirtualMemoryReserve::VirtualMemoryReserve(std::string name, size_t size)
|
||||
VirtualMemoryReserve::VirtualMemoryReserve(std::string name)
|
||||
: m_name(std::move(name))
|
||||
{
|
||||
m_defsize = size;
|
||||
|
||||
m_allocator = nullptr;
|
||||
m_pages_commited = 0;
|
||||
m_pages_reserved = 0;
|
||||
m_baseptr = nullptr;
|
||||
m_prot_mode = PageAccess_None();
|
||||
m_allow_writes = true;
|
||||
}
|
||||
|
||||
VirtualMemoryReserve& VirtualMemoryReserve::SetPageAccessOnCommit(const PageProtectionMode& mode)
|
||||
VirtualMemoryReserve::~VirtualMemoryReserve()
|
||||
{
|
||||
m_prot_mode = mode;
|
||||
return *this;
|
||||
}
|
||||
|
||||
size_t VirtualMemoryReserve::GetSize(size_t requestedSize)
|
||||
{
|
||||
if (!requestedSize)
|
||||
return pageAlign(m_defsize);
|
||||
return pageAlign(requestedSize);
|
||||
pxAssertRel(!m_baseptr, "VirtualMemoryReserve has not been released.");
|
||||
}
|
||||
|
||||
// Notes:
|
||||
|
@ -269,136 +295,43 @@ size_t VirtualMemoryReserve::GetSize(size_t requestedSize)
|
|||
// baseptr - the new base pointer that's about to be assigned
|
||||
// size - size of the region pointed to by baseptr
|
||||
//
|
||||
void* VirtualMemoryReserve::Assign(VirtualMemoryManagerPtr allocator, void* baseptr, size_t size)
|
||||
void VirtualMemoryReserve::Assign(VirtualMemoryManagerPtr allocator, u8* baseptr, size_t size)
|
||||
{
|
||||
if (!pxAssertDev(m_baseptr == NULL, "(VirtualMemoryReserve) Invalid object state; object has already been reserved."))
|
||||
return m_baseptr;
|
||||
|
||||
if (!size)
|
||||
return nullptr;
|
||||
pxAssertRel(size > 0 && Common::IsAlignedPow2(size, __pagesize), "VM allocation is not page aligned");
|
||||
pxAssertRel(!m_baseptr, "Virtual memory reserve has already been assigned");
|
||||
|
||||
m_allocator = std::move(allocator);
|
||||
|
||||
m_baseptr = baseptr;
|
||||
|
||||
uptr reserved_bytes = pageAlign(size);
|
||||
m_pages_reserved = reserved_bytes / __pagesize;
|
||||
|
||||
if (!m_baseptr)
|
||||
return nullptr;
|
||||
m_size = size;
|
||||
|
||||
std::string mbkb;
|
||||
uint mbytes = reserved_bytes / _1mb;
|
||||
uint mbytes = size / _1mb;
|
||||
if (mbytes)
|
||||
mbkb = fmt::format("[{}mb]", mbytes);
|
||||
else
|
||||
mbkb = fmt::format("[{}kb]", reserved_bytes / 1024);
|
||||
mbkb = fmt::format("[{}kb]", size / 1024);
|
||||
|
||||
DevCon.WriteLn(Color_Gray, "%-32s @ 0x%016" PRIXPTR " -> 0x%016" PRIXPTR " %s", m_name.c_str(),
|
||||
m_baseptr, (uptr)m_baseptr + reserved_bytes, mbkb.c_str());
|
||||
|
||||
return m_baseptr;
|
||||
m_baseptr, (uptr)m_baseptr + size, mbkb.c_str());
|
||||
}
|
||||
|
||||
void VirtualMemoryReserve::ReprotectCommittedBlocks(const PageProtectionMode& newmode)
|
||||
u8* VirtualMemoryReserve::BumpAllocate(VirtualMemoryBumpAllocator& allocator, size_t size)
|
||||
{
|
||||
if (!m_pages_commited)
|
||||
return;
|
||||
HostSys::MemProtect(m_baseptr, m_pages_commited * __pagesize, newmode);
|
||||
}
|
||||
u8* base = allocator.Alloc(size);
|
||||
if (base)
|
||||
Assign(allocator.GetAllocator(), base, size);
|
||||
|
||||
// Clears all committed blocks, restoring the allocation to a reserve only.
|
||||
void VirtualMemoryReserve::Reset()
|
||||
{
|
||||
if (!m_pages_commited)
|
||||
return;
|
||||
|
||||
ReprotectCommittedBlocks(PageAccess_None());
|
||||
HostSys::MmapResetPtr(m_baseptr, m_pages_commited * __pagesize);
|
||||
m_pages_commited = 0;
|
||||
return base;
|
||||
}
|
||||
|
||||
void VirtualMemoryReserve::Release()
|
||||
{
|
||||
if (!m_baseptr)
|
||||
return;
|
||||
Reset();
|
||||
m_allocator->Free(m_baseptr, m_pages_reserved * __pagesize);
|
||||
|
||||
m_allocator->Free(m_baseptr, m_size);
|
||||
m_baseptr = nullptr;
|
||||
}
|
||||
|
||||
bool VirtualMemoryReserve::Commit()
|
||||
{
|
||||
if (!m_pages_reserved)
|
||||
return false;
|
||||
if (!pxAssert(!m_pages_commited))
|
||||
return true;
|
||||
|
||||
m_pages_commited = m_pages_reserved;
|
||||
return HostSys::MmapCommitPtr(m_baseptr, m_pages_reserved * __pagesize, m_prot_mode);
|
||||
}
|
||||
|
||||
void VirtualMemoryReserve::AllowModification()
|
||||
{
|
||||
m_allow_writes = true;
|
||||
HostSys::MemProtect(m_baseptr, m_pages_commited * __pagesize, m_prot_mode);
|
||||
}
|
||||
|
||||
void VirtualMemoryReserve::ForbidModification()
|
||||
{
|
||||
m_allow_writes = false;
|
||||
HostSys::MemProtect(m_baseptr, m_pages_commited * __pagesize, PageProtectionMode(m_prot_mode).Write(false));
|
||||
}
|
||||
|
||||
|
||||
// If growing the array, or if shrinking the array to some point that's still *greater* than the
|
||||
// committed memory range, then attempt a passive "on-the-fly" resize that maps/unmaps some portion
|
||||
// of the reserve.
|
||||
//
|
||||
// If the above conditions are not met, or if the map/unmap fails, this method returns false.
|
||||
// The caller will be responsible for manually resetting the reserve.
|
||||
//
|
||||
// Parameters:
|
||||
// newsize - new size of the reserved buffer, in bytes.
|
||||
bool VirtualMemoryReserve::TryResize(uint newsize)
|
||||
{
|
||||
uint newPages = pageAlign(newsize) / __pagesize;
|
||||
|
||||
if (newPages > m_pages_reserved)
|
||||
{
|
||||
uint toReservePages = newPages - m_pages_reserved;
|
||||
uint toReserveBytes = toReservePages * __pagesize;
|
||||
|
||||
DevCon.WriteLn("%-32s is being expanded by %u pages.", m_name.c_str(), toReservePages);
|
||||
|
||||
if (!m_allocator->AllocAtAddress(GetPtrEnd(), toReserveBytes))
|
||||
{
|
||||
Console.Warning("%-32s could not be passively resized due to virtual memory conflict!", m_name.c_str());
|
||||
Console.Indent().Warning("(attempted to map memory @ %08p -> %08p)", m_baseptr, (uptr)m_baseptr + toReserveBytes);
|
||||
return false;
|
||||
}
|
||||
|
||||
DevCon.WriteLn(Color_Gray, "%-32s @ %08p -> %08p [%umb]", m_name.c_str(),
|
||||
m_baseptr, (uptr)m_baseptr + toReserveBytes, toReserveBytes / _1mb);
|
||||
}
|
||||
else if (newPages < m_pages_reserved)
|
||||
{
|
||||
if (m_pages_commited > newsize)
|
||||
return false;
|
||||
|
||||
uint toRemovePages = m_pages_reserved - newPages;
|
||||
uint toRemoveBytes = toRemovePages * __pagesize;
|
||||
|
||||
DevCon.WriteLn("%-32s is being shrunk by %u pages.", m_name.c_str(), toRemovePages);
|
||||
|
||||
m_allocator->Free(GetPtrEnd() - toRemoveBytes, toRemoveBytes);
|
||||
|
||||
DevCon.WriteLn(Color_Gray, "%-32s @ %08p -> %08p [%umb]", m_name.c_str(),
|
||||
m_baseptr, GetPtrEnd(), GetReserveSizeInBytes() / _1mb);
|
||||
}
|
||||
|
||||
m_pages_reserved = newPages;
|
||||
return true;
|
||||
m_size = 0;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -422,11 +355,3 @@ std::string PageProtectionMode::ToString() const
|
|||
|
||||
return modeStr;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// Common HostSys implementation
|
||||
// --------------------------------------------------------------------------------------
|
||||
void HostSys::Munmap(void* base, size_t size)
|
||||
{
|
||||
Munmap((uptr)base, size);
|
||||
}
|
||||
|
|
|
@ -15,12 +15,14 @@
|
|||
|
||||
#if defined(_WIN32)
|
||||
|
||||
#include "common/Align.h"
|
||||
#include "common/RedtapeWindows.h"
|
||||
#include "common/PageFaultSource.h"
|
||||
#include "common/Console.h"
|
||||
#include "common/Exceptions.h"
|
||||
#include "common/StringUtil.h"
|
||||
#include "common/AlignedMalloc.h"
|
||||
#include "fmt/core.h"
|
||||
|
||||
static long DoSysPageFaultExceptionFilter(EXCEPTION_POINTERS* eps)
|
||||
{
|
||||
|
@ -81,64 +83,19 @@ static DWORD ConvertToWinApi(const PageProtectionMode& mode)
|
|||
return winmode;
|
||||
}
|
||||
|
||||
void* HostSys::MmapReservePtr(void* base, size_t size)
|
||||
void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
return VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
if (mode.IsNone())
|
||||
return nullptr;
|
||||
|
||||
return VirtualAlloc(base, size, MEM_RESERVE | MEM_COMMIT, ConvertToWinApi(mode));
|
||||
}
|
||||
|
||||
bool HostSys::MmapCommitPtr(void* base, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
void* result = VirtualAlloc(base, size, MEM_COMMIT, ConvertToWinApi(mode));
|
||||
if (result)
|
||||
return true;
|
||||
|
||||
const DWORD errcode = GetLastError();
|
||||
if (errcode == ERROR_COMMITMENT_MINIMUM)
|
||||
{
|
||||
Console.Warning("(MmapCommit) Received windows error %u {Virtual Memory Minimum Too Low}.", ERROR_COMMITMENT_MINIMUM);
|
||||
Sleep(1000); // Cut windows some time to rework its memory...
|
||||
}
|
||||
else if (errcode != ERROR_NOT_ENOUGH_MEMORY && errcode != ERROR_OUTOFMEMORY)
|
||||
{
|
||||
pxFailDev(("VirtualAlloc COMMIT failed: " + Exception::WinApiError().GetMsgFromWindows()).c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void HostSys::MmapResetPtr(void* base, size_t size)
|
||||
{
|
||||
VirtualFree(base, size, MEM_DECOMMIT);
|
||||
}
|
||||
|
||||
|
||||
void* HostSys::MmapReserve(uptr base, size_t size)
|
||||
{
|
||||
return MmapReservePtr((void*)base, size);
|
||||
}
|
||||
|
||||
bool HostSys::MmapCommit(uptr base, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
return MmapCommitPtr((void*)base, size, mode);
|
||||
}
|
||||
|
||||
void HostSys::MmapReset(uptr base, size_t size)
|
||||
{
|
||||
MmapResetPtr((void*)base, size);
|
||||
}
|
||||
|
||||
|
||||
void* HostSys::Mmap(uptr base, size_t size)
|
||||
{
|
||||
return VirtualAlloc((void*)base, size, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE);
|
||||
}
|
||||
|
||||
void HostSys::Munmap(uptr base, size_t size)
|
||||
void HostSys::Munmap(void* base, size_t size)
|
||||
{
|
||||
if (!base)
|
||||
return;
|
||||
//VirtualFree((void*)base, size, MEM_DECOMMIT);
|
||||
|
||||
VirtualFree((void*)base, 0, MEM_RELEASE);
|
||||
}
|
||||
|
||||
|
@ -148,14 +105,47 @@ void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode&
|
|||
|
||||
DWORD OldProtect; // enjoy my uselessness, yo!
|
||||
if (!VirtualProtect(baseaddr, size, ConvertToWinApi(mode), &OldProtect))
|
||||
{
|
||||
Exception::WinApiError apiError;
|
||||
|
||||
apiError.SetDiagMsg(
|
||||
StringUtil::StdStringFromFormat("VirtualProtect failed @ 0x%08X -> 0x%08X (mode=%s)",
|
||||
baseaddr, (uptr)baseaddr + size, mode.ToString().c_str()));
|
||||
|
||||
pxFailDev(apiError.FormatDiagnosticMessage().c_str());
|
||||
}
|
||||
pxFail("VirtualProtect() failed");
|
||||
}
|
||||
|
||||
std::string HostSys::GetFileMappingName(const char* prefix)
|
||||
{
|
||||
const unsigned pid = GetCurrentProcessId();
|
||||
return fmt::format("{}_{}", prefix, pid);
|
||||
}
|
||||
|
||||
void* HostSys::CreateSharedMemory(const char* name, size_t size)
|
||||
{
|
||||
return static_cast<void*>(CreateFileMappingW(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE,
|
||||
static_cast<DWORD>(size >> 32), static_cast<DWORD>(size), StringUtil::UTF8StringToWideString(name).c_str()));
|
||||
}
|
||||
|
||||
void HostSys::DestroySharedMemory(void* ptr)
|
||||
{
|
||||
CloseHandle(static_cast<HANDLE>(ptr));
|
||||
}
|
||||
|
||||
void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
void* ret = MapViewOfFileEx(static_cast<HANDLE>(handle), FILE_MAP_READ | FILE_MAP_WRITE,
|
||||
static_cast<DWORD>(offset >> 32), static_cast<DWORD>(offset), size, baseaddr);
|
||||
if (!ret)
|
||||
return nullptr;
|
||||
|
||||
const DWORD prot = ConvertToWinApi(mode);
|
||||
if (prot != PAGE_READWRITE)
|
||||
{
|
||||
DWORD old_prot;
|
||||
if (!VirtualProtect(ret, size, prot, &old_prot))
|
||||
pxFail("Failed to protect memory mapping");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void HostSys::UnmapSharedMemory(void* baseaddr, size_t size)
|
||||
{
|
||||
if (!UnmapViewOfFile(baseaddr))
|
||||
pxFail("Failed to unmap shared memory");
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -290,13 +290,9 @@ u32 readCache32(u32 mem)
|
|||
return readCache<u32>(mem);
|
||||
}
|
||||
|
||||
RETURNS_R64 readCache64(u32 mem)
|
||||
u64 readCache64(u32 mem)
|
||||
{
|
||||
int way, idx;
|
||||
void* addr = prepareCacheAccess<false, sizeof(u64)>(mem, &way, &idx);
|
||||
r64 value = r64_load(addr);
|
||||
CACHE_LOG("readCache64 %8.8x from %d, way %d, value %llx", mem, idx, way, *(u64*)&value);
|
||||
return value;
|
||||
return readCache<u64>(mem);
|
||||
}
|
||||
|
||||
RETURNS_R128 readCache128(u32 mem)
|
||||
|
|
|
@ -27,5 +27,5 @@ void writeCache128(u32 mem, const mem128_t* value);
|
|||
u8 readCache8(u32 mem);
|
||||
u16 readCache16(u32 mem);
|
||||
u32 readCache32(u32 mem);
|
||||
RETURNS_R64 readCache64(u32 mem);
|
||||
u64 readCache64(u32 mem);
|
||||
RETURNS_R128 readCache128(u32 mem);
|
||||
|
|
|
@ -294,9 +294,7 @@ u64 R5900DebugInterface::read64(u32 address)
|
|||
if (!isValidAddress(address) || address % 8)
|
||||
return -1;
|
||||
|
||||
u64 result;
|
||||
memRead64(address, result);
|
||||
return result;
|
||||
return memRead64(address);
|
||||
}
|
||||
|
||||
u128 R5900DebugInterface::read128(u32 address)
|
||||
|
|
36
pcsx2/GS.cpp
36
pcsx2/GS.cpp
|
@ -215,30 +215,29 @@ __fi void gsWrite32(u32 mem, u32 value)
|
|||
//////////////////////////////////////////////////////////////////////////
|
||||
// GS Write 64 bit
|
||||
|
||||
void gsWrite64_generic( u32 mem, const mem64_t* value )
|
||||
void gsWrite64_generic( u32 mem, u64 value )
|
||||
{
|
||||
const u32* const srcval32 = (u32*)value;
|
||||
GIF_LOG("GS Write64 at %8.8lx with data %8.8x_%8.8x", mem, srcval32[1], srcval32[0]);
|
||||
GIF_LOG("GS Write64 at %8.8lx with data %8.8x_%8.8x", mem, (u32)(value >> 32), (u32)value);
|
||||
|
||||
*(u64*)PS2GS_BASE(mem) = *value;
|
||||
std::memcpy(PS2GS_BASE(mem), &value, sizeof(value));
|
||||
}
|
||||
|
||||
void gsWrite64_page_00( u32 mem, const mem64_t* value )
|
||||
void gsWrite64_page_00( u32 mem, u64 value )
|
||||
{
|
||||
s_GSRegistersWritten |= (mem == GS_DISPFB1 || mem == GS_DISPFB2 || mem == GS_PMODE);
|
||||
|
||||
gsWrite64_generic( mem, value );
|
||||
}
|
||||
|
||||
void gsWrite64_page_01( u32 mem, const mem64_t* value )
|
||||
void gsWrite64_page_01( u32 mem, u64 value )
|
||||
{
|
||||
GIF_LOG("GS Write64 at %8.8lx with data %8.8x_%8.8x", mem, (u32*)value[1], (u32*)value[0]);
|
||||
GIF_LOG("GS Write64 at %8.8lx with data %8.8x_%8.8x", mem, (u32)(value >> 32), (u32)value);
|
||||
|
||||
switch( mem )
|
||||
{
|
||||
case GS_BUSDIR:
|
||||
|
||||
gifUnit.stat.DIR = value[0] & 1;
|
||||
gifUnit.stat.DIR = static_cast<u32>(value) & 1;
|
||||
if (gifUnit.stat.DIR) { // Assume will do local->host transfer
|
||||
gifUnit.stat.OPH = true; // Should we set OPH here?
|
||||
gifUnit.FlushToMTGS(); // Send any pending GS Primitives to the GS
|
||||
|
@ -252,11 +251,11 @@ void gsWrite64_page_01( u32 mem, const mem64_t* value )
|
|||
return;
|
||||
|
||||
case GS_CSR:
|
||||
gsCSRwrite(tGS_CSR(*value));
|
||||
gsCSRwrite(tGS_CSR(value));
|
||||
return;
|
||||
|
||||
case GS_IMR:
|
||||
IMRwrite((u32)value[0]);
|
||||
IMRwrite(static_cast<u32>(value));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -266,35 +265,34 @@ void gsWrite64_page_01( u32 mem, const mem64_t* value )
|
|||
//////////////////////////////////////////////////////////////////////////
|
||||
// GS Write 128 bit
|
||||
|
||||
void gsWrite128_page_00( u32 mem, const mem128_t* value )
|
||||
void TAKES_R128 gsWrite128_page_00( u32 mem, r128 value )
|
||||
{
|
||||
gsWrite128_generic( mem, value );
|
||||
}
|
||||
|
||||
void gsWrite128_page_01( u32 mem, const mem128_t* value )
|
||||
void TAKES_R128 gsWrite128_page_01( u32 mem, r128 value )
|
||||
{
|
||||
switch( mem )
|
||||
{
|
||||
case GS_CSR:
|
||||
gsCSRwrite((u32)value[0]);
|
||||
gsCSRwrite(r128_to_u32(value));
|
||||
return;
|
||||
|
||||
case GS_IMR:
|
||||
IMRwrite((u32)value[0]);
|
||||
IMRwrite(r128_to_u32(value));
|
||||
return;
|
||||
}
|
||||
|
||||
gsWrite128_generic( mem, value );
|
||||
}
|
||||
|
||||
void gsWrite128_generic( u32 mem, const mem128_t* value )
|
||||
void TAKES_R128 gsWrite128_generic( u32 mem, r128 value )
|
||||
{
|
||||
const u32* const srcval32 = (u32*)value;
|
||||
|
||||
alignas(16) const u128 uvalue = r128_to_u128(value);
|
||||
GIF_LOG("GS Write128 at %8.8lx with data %8.8x_%8.8x_%8.8x_%8.8x", mem,
|
||||
srcval32[3], srcval32[2], srcval32[1], srcval32[0]);
|
||||
uvalue._u32[3], uvalue._u32[2], uvalue._u32[1], uvalue._u32[0]);
|
||||
|
||||
CopyQWC(PS2GS_BASE(mem), value);
|
||||
r128_store(PS2GS_BASE(mem), value);
|
||||
}
|
||||
|
||||
__fi u8 gsRead8(u32 mem)
|
||||
|
|
13
pcsx2/GS.h
13
pcsx2/GS.h
|
@ -18,6 +18,7 @@
|
|||
#include "Common.h"
|
||||
#include "Gif.h"
|
||||
#include "GS/GS.h"
|
||||
#include "SingleRegisterTypes.h"
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
|
@ -451,13 +452,13 @@ extern void gsWrite8(u32 mem, u8 value);
|
|||
extern void gsWrite16(u32 mem, u16 value);
|
||||
extern void gsWrite32(u32 mem, u32 value);
|
||||
|
||||
extern void gsWrite64_page_00(u32 mem, const mem64_t* value);
|
||||
extern void gsWrite64_page_01(u32 mem, const mem64_t* value);
|
||||
extern void gsWrite64_generic(u32 mem, const mem64_t* value);
|
||||
extern void gsWrite64_page_00(u32 mem, u64 value);
|
||||
extern void gsWrite64_page_01(u32 mem, u64 value);
|
||||
extern void gsWrite64_generic(u32 mem, u64 value);
|
||||
|
||||
extern void gsWrite128_page_00(u32 mem, const mem128_t* value);
|
||||
extern void gsWrite128_page_01(u32 mem, const mem128_t* value);
|
||||
extern void gsWrite128_generic(u32 mem, const mem128_t* value);
|
||||
extern void TAKES_R128 gsWrite128_page_00(u32 mem, r128 value);
|
||||
extern void TAKES_R128 gsWrite128_page_01(u32 mem, r128 value);
|
||||
extern void TAKES_R128 gsWrite128_generic(u32 mem, r128 value);
|
||||
|
||||
extern u8 gsRead8(u32 mem);
|
||||
extern u16 gsRead16(u32 mem);
|
||||
|
|
|
@ -50,8 +50,6 @@ static void GSDumpReplayerCpuCheckExecutionState();
|
|||
static void GSDumpReplayerCpuThrowException(const BaseException& ex);
|
||||
static void GSDumpReplayerCpuThrowCpuException(const BaseR5900Exception& ex);
|
||||
static void GSDumpReplayerCpuClear(u32 addr, u32 size);
|
||||
static uint GSDumpReplayerCpuGetCacheReserve();
|
||||
static void GSDumpReplayerCpuSetCacheReserve(uint reserveInMegs);
|
||||
|
||||
static std::unique_ptr<GSDumpFile> s_dump_file;
|
||||
static u32 s_current_packet = 0;
|
||||
|
@ -70,9 +68,7 @@ R5900cpu GSDumpReplayerCpu = {
|
|||
GSDumpReplayerCpuCheckExecutionState,
|
||||
GSDumpReplayerCpuThrowException,
|
||||
GSDumpReplayerCpuThrowCpuException,
|
||||
GSDumpReplayerCpuClear,
|
||||
GSDumpReplayerCpuGetCacheReserve,
|
||||
GSDumpReplayerCpuSetCacheReserve};
|
||||
GSDumpReplayerCpuClear};
|
||||
|
||||
static InterpVU0 gsDumpVU0;
|
||||
static InterpVU1 gsDumpVU1;
|
||||
|
@ -320,15 +316,6 @@ void GSDumpReplayerCpuClear(u32 addr, u32 size)
|
|||
{
|
||||
}
|
||||
|
||||
uint GSDumpReplayerCpuGetCacheReserve()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void GSDumpReplayerCpuSetCacheReserve(uint reserveInMegs)
|
||||
{
|
||||
}
|
||||
|
||||
void GSDumpReplayer::RenderUI()
|
||||
{
|
||||
const float scale = ImGuiManager::GetGlobalScale();
|
||||
|
|
|
@ -266,7 +266,7 @@ mem16_t hwRead16_page_0F_INTC_HACK(u32 mem)
|
|||
}
|
||||
|
||||
template< uint page >
|
||||
static RETURNS_R64 _hwRead64(u32 mem)
|
||||
static u64 _hwRead64(u32 mem)
|
||||
{
|
||||
pxAssume( (mem & 0x07) == 0 );
|
||||
|
||||
|
@ -289,7 +289,7 @@ static RETURNS_R64 _hwRead64(u32 mem)
|
|||
|
||||
uint wordpart = (mem >> 3) & 0x1;
|
||||
r128 full = _hwRead128<page>(mem & ~0x0f);
|
||||
return r64_load(reinterpret_cast<u64*>(&full) + wordpart);
|
||||
return *(reinterpret_cast<u64*>(&full) + wordpart);
|
||||
}
|
||||
case 0x0F:
|
||||
if ((mem & 0xffffff00) == 0x1000f300)
|
||||
|
@ -302,20 +302,20 @@ static RETURNS_R64 _hwRead64(u32 mem)
|
|||
u32 lo = psHu32(0x1000f3E0);
|
||||
ReadFifoSingleWord();
|
||||
u32 hi = psHu32(0x1000f3E0);
|
||||
return r64_from_u32x2(lo, hi);
|
||||
return static_cast<u64>(lo) | (static_cast<u64>(hi) << 32);
|
||||
}
|
||||
}
|
||||
default: break;
|
||||
}
|
||||
|
||||
return r64_from_u32(_hwRead32<page, false>(mem));
|
||||
return static_cast<u64>(_hwRead32<page, false>(mem));
|
||||
}
|
||||
|
||||
template< uint page >
|
||||
RETURNS_R64 hwRead64(u32 mem)
|
||||
mem64_t hwRead64(u32 mem)
|
||||
{
|
||||
r64 res = _hwRead64<page>(mem);
|
||||
eeHwTraceLog(mem, *(u64*)&res, true);
|
||||
u64 res = _hwRead64<page>(mem);
|
||||
eeHwTraceLog(mem, res, true);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -379,7 +379,7 @@ RETURNS_R128 _hwRead128(u32 mem)
|
|||
break;
|
||||
|
||||
default:
|
||||
return r128_from_r64_clean(_hwRead64<page>(mem));
|
||||
return r128_from_u64_dup(_hwRead64<page>(mem));
|
||||
}
|
||||
return r128_load(&result);
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ template< uint page >
|
|||
RETURNS_R128 hwRead128(u32 mem)
|
||||
{
|
||||
r128 res = _hwRead128<page>(mem);
|
||||
eeHwTraceLog(mem, *(mem128_t*)&res, true);
|
||||
eeHwTraceLog(mem, res, true);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -396,7 +396,7 @@ RETURNS_R128 hwRead128(u32 mem)
|
|||
template mem8_t hwRead8<pageidx>(u32 mem); \
|
||||
template mem16_t hwRead16<pageidx>(u32 mem); \
|
||||
template mem32_t hwRead32<pageidx>(u32 mem); \
|
||||
template RETURNS_R64 hwRead64<pageidx>(u32 mem); \
|
||||
template mem64_t hwRead64<pageidx>(u32 mem); \
|
||||
template RETURNS_R128 hwRead128<pageidx>(u32 mem); \
|
||||
template mem32_t _hwRead32<pageidx, false>(u32 mem);
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ using namespace R5900;
|
|||
|
||||
template< uint page > void _hwWrite8(u32 mem, u8 value);
|
||||
template< uint page > void _hwWrite16(u32 mem, u8 value);
|
||||
template< uint page > void _hwWrite128(u32 mem, u8 value);
|
||||
template< uint page > void TAKES_R128 _hwWrite128(u32 mem, r128 value);
|
||||
|
||||
|
||||
template<uint page>
|
||||
|
@ -80,7 +80,7 @@ void _hwWrite32( u32 mem, u32 value )
|
|||
u128 zerofill = u128::From32(0);
|
||||
zerofill._u32[(mem >> 2) & 0x03] = value;
|
||||
|
||||
_hwWrite128<page>(mem & ~0x0f, &zerofill);
|
||||
_hwWrite128<page>(mem & ~0x0f, r128_from_u128(zerofill));
|
||||
}
|
||||
return;
|
||||
|
||||
|
@ -373,7 +373,7 @@ void hwWrite16(u32 mem, u16 value)
|
|||
}
|
||||
|
||||
template<uint page>
|
||||
void _hwWrite64( u32 mem, const mem64_t* srcval )
|
||||
void _hwWrite64( u32 mem, u64 value )
|
||||
{
|
||||
pxAssume( (mem & 0x07) == 0 );
|
||||
|
||||
|
@ -387,7 +387,7 @@ void _hwWrite64( u32 mem, const mem64_t* srcval )
|
|||
switch (page)
|
||||
{
|
||||
case 0x02:
|
||||
if (!ipuWrite64(mem, *srcval)) return;
|
||||
if (!ipuWrite64(mem, value)) return;
|
||||
break;
|
||||
|
||||
case 0x04:
|
||||
|
@ -396,30 +396,30 @@ void _hwWrite64( u32 mem, const mem64_t* srcval )
|
|||
case 0x07:
|
||||
{
|
||||
u128 zerofill = u128::From32(0);
|
||||
zerofill._u64[(mem >> 3) & 0x01] = *srcval;
|
||||
hwWrite128<page>(mem & ~0x0f, &zerofill);
|
||||
zerofill._u64[(mem >> 3) & 0x01] = value;
|
||||
hwWrite128<page>(mem & ~0x0f, r128_from_u128(zerofill));
|
||||
}
|
||||
return;
|
||||
|
||||
default:
|
||||
// disregard everything except the lower 32 bits.
|
||||
// ... and skip the 64 bit writeback since the 32-bit one will suffice.
|
||||
hwWrite32<page>( mem, ((u32*)srcval)[0] );
|
||||
hwWrite32<page>( mem, value );
|
||||
return;
|
||||
}
|
||||
|
||||
psHu64(mem) = *srcval;
|
||||
std::memcpy(&eeHw[(mem) & 0xffff], &value, sizeof(value));
|
||||
}
|
||||
|
||||
template<uint page>
|
||||
void hwWrite64( u32 mem, const mem64_t* srcval )
|
||||
void hwWrite64( u32 mem, mem64_t value )
|
||||
{
|
||||
eeHwTraceLog( mem, *srcval, false );
|
||||
_hwWrite64<page>(mem, srcval);
|
||||
eeHwTraceLog( mem, value, false );
|
||||
_hwWrite64<page>(mem, value);
|
||||
}
|
||||
|
||||
template< uint page >
|
||||
void _hwWrite128(u32 mem, const mem128_t* srcval)
|
||||
void TAKES_R128 _hwWrite128(u32 mem, r128 srcval)
|
||||
{
|
||||
pxAssume( (mem & 0x0f) == 0 );
|
||||
|
||||
|
@ -429,24 +429,35 @@ void _hwWrite128(u32 mem, const mem128_t* srcval)
|
|||
#if PSX_EXTRALOGS
|
||||
if ((mem & 0x1000ff00) == 0x1000f300) DevCon.Warning("128bit Write to SIF Register %x wibble", mem);
|
||||
#endif
|
||||
|
||||
switch (page)
|
||||
{
|
||||
case 0x04:
|
||||
WriteFIFO_VIF0(srcval);
|
||||
{
|
||||
alignas(16) const u128 usrcval = r128_to_u128(srcval);
|
||||
WriteFIFO_VIF0(&usrcval);
|
||||
}
|
||||
return;
|
||||
|
||||
case 0x05:
|
||||
WriteFIFO_VIF1(srcval);
|
||||
{
|
||||
alignas(16) const u128 usrcval = r128_to_u128(srcval);
|
||||
WriteFIFO_VIF1(&usrcval);
|
||||
}
|
||||
return;
|
||||
|
||||
case 0x06:
|
||||
WriteFIFO_GIF(srcval);
|
||||
{
|
||||
alignas(16) const u128 usrcval = r128_to_u128(srcval);
|
||||
WriteFIFO_GIF(&usrcval);
|
||||
}
|
||||
return;
|
||||
|
||||
case 0x07:
|
||||
if (mem & 0x10)
|
||||
{
|
||||
WriteFIFO_IPUin(srcval);
|
||||
alignas(16) const u128 usrcval = r128_to_u128(srcval);
|
||||
WriteFIFO_IPUin(&usrcval);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -462,7 +473,8 @@ void _hwWrite128(u32 mem, const mem128_t* srcval)
|
|||
case 0x0F:
|
||||
// todo: psx mode: this is new
|
||||
if (((mem & 0x1FFFFFFF) >= EEMemoryMap::SBUS_PS1_Start) && ((mem & 0x1FFFFFFF) < EEMemoryMap::SBUS_PS1_End)) {
|
||||
PGIFwQword((mem & 0x1FFFFFFF), (void*)srcval);
|
||||
alignas(16) const u128 usrcval = r128_to_u128(srcval);
|
||||
PGIFwQword((mem & 0x1FFFFFFF), (void*)&usrcval);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -470,15 +482,15 @@ void _hwWrite128(u32 mem, const mem128_t* srcval)
|
|||
}
|
||||
|
||||
// All upper bits of all non-FIFO 128-bit HW writes are almost certainly disregarded. --air
|
||||
hwWrite64<page>(mem, (mem64_t*)srcval);
|
||||
hwWrite64<page>(mem, r128_to_u64(srcval));
|
||||
|
||||
//CopyQWC(&psHu128(mem), srcval);
|
||||
}
|
||||
|
||||
template< uint page >
|
||||
void hwWrite128(u32 mem, const mem128_t* srcval)
|
||||
void TAKES_R128 hwWrite128(u32 mem, r128 srcval)
|
||||
{
|
||||
eeHwTraceLog( mem, *srcval, false );
|
||||
eeHwTraceLog( mem, srcval, false );
|
||||
_hwWrite128<page>(mem, srcval);
|
||||
}
|
||||
|
||||
|
@ -486,8 +498,8 @@ void hwWrite128(u32 mem, const mem128_t* srcval)
|
|||
template void hwWrite8<pageidx>(u32 mem, mem8_t value); \
|
||||
template void hwWrite16<pageidx>(u32 mem, mem16_t value); \
|
||||
template void hwWrite32<pageidx>(u32 mem, mem32_t value); \
|
||||
template void hwWrite64<pageidx>(u32 mem, const mem64_t* srcval); \
|
||||
template void hwWrite128<pageidx>(u32 mem, const mem128_t* srcval);
|
||||
template void hwWrite64<pageidx>(u32 mem, mem64_t value); \
|
||||
template void TAKES_R128 hwWrite128<pageidx>(u32 mem, r128 srcval);
|
||||
|
||||
InstantizeHwWrite(0x00); InstantizeHwWrite(0x08);
|
||||
InstantizeHwWrite(0x01); InstantizeHwWrite(0x09);
|
||||
|
|
|
@ -233,7 +233,7 @@ __fi u32 ipuRead32(u32 mem)
|
|||
return psHu32(IPU_CMD + mem);
|
||||
}
|
||||
|
||||
__fi RETURNS_R64 ipuRead64(u32 mem)
|
||||
__fi u64 ipuRead64(u32 mem)
|
||||
{
|
||||
// Note: It's assumed that mem's input value is always in the 0x10002000 page
|
||||
// of memory (if not, it's probably bad code).
|
||||
|
@ -253,7 +253,7 @@ __fi RETURNS_R64 ipuRead64(u32 mem)
|
|||
|
||||
if (ipuRegs.cmd.DATA & 0xffffff)
|
||||
IPU_LOG("read64: IPU_CMD=BUSY=%x, DATA=%08X", ipuRegs.cmd.BUSY ? 1 : 0, ipuRegs.cmd.DATA);
|
||||
return r64_load(&ipuRegs.cmd._u64);
|
||||
return ipuRegs.cmd._u64;
|
||||
}
|
||||
|
||||
ipucase(IPU_CTRL):
|
||||
|
@ -272,7 +272,7 @@ __fi RETURNS_R64 ipuRead64(u32 mem)
|
|||
IPU_LOG("read64: Unknown=%x", mem);
|
||||
break;
|
||||
}
|
||||
return r64_load(&psHu64(IPU_CMD + mem));
|
||||
return psHu64(IPU_CMD + mem);
|
||||
}
|
||||
|
||||
void ipuSoftReset()
|
||||
|
|
|
@ -295,7 +295,7 @@ extern bool CommandExecuteQueued;
|
|||
extern void ipuReset();
|
||||
|
||||
extern u32 ipuRead32(u32 mem);
|
||||
extern RETURNS_R64 ipuRead64(u32 mem);
|
||||
extern u64 ipuRead64(u32 mem);
|
||||
extern bool ipuWrite32(u32 mem,u32 value);
|
||||
extern bool ipuWrite64(u32 mem,u64 value);
|
||||
|
||||
|
|
|
@ -615,15 +615,6 @@ static void intThrowException( const BaseException& ex )
|
|||
ex.Rethrow();
|
||||
}
|
||||
|
||||
static void intSetCacheReserve( uint reserveInMegs )
|
||||
{
|
||||
}
|
||||
|
||||
static uint intGetCacheReserve()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
R5900cpu intCpu =
|
||||
{
|
||||
intReserve,
|
||||
|
@ -636,8 +627,5 @@ R5900cpu intCpu =
|
|||
intSafeExitExecution,
|
||||
intThrowException,
|
||||
intThrowException,
|
||||
intClear,
|
||||
|
||||
intGetCacheReserve,
|
||||
intSetCacheReserve,
|
||||
intClear
|
||||
};
|
||||
|
|
|
@ -34,20 +34,34 @@ alignas(__pagesize) u8 iopHw[Ps2MemSize::IopHardware];
|
|||
// iopMemoryReserve
|
||||
// --------------------------------------------------------------------------------------
|
||||
iopMemoryReserve::iopMemoryReserve()
|
||||
: _parent( "IOP Main Memory (2mb)", sizeof(*iopMem) )
|
||||
: _parent("IOP Main Memory (2mb)")
|
||||
{
|
||||
}
|
||||
|
||||
void iopMemoryReserve::Reserve(VirtualMemoryManagerPtr allocator)
|
||||
iopMemoryReserve::~iopMemoryReserve()
|
||||
{
|
||||
_parent::Reserve(std::move(allocator), HostMemoryMap::IOPmemOffset);
|
||||
//_parent::Reserve(EmuConfig.HostMap.IOP);
|
||||
Release();
|
||||
}
|
||||
|
||||
void iopMemoryReserve::Commit()
|
||||
void iopMemoryReserve::Assign(VirtualMemoryManagerPtr allocator)
|
||||
{
|
||||
_parent::Commit();
|
||||
iopMem = (IopVM_MemoryAllocMess*)m_reserve.GetPtr();
|
||||
psxMemWLUT = (uptr*)_aligned_malloc(0x2000 * sizeof(uptr) * 2, 16);
|
||||
if (!psxMemWLUT)
|
||||
pxFailRel("Failed to allocate IOP memory lookup table");
|
||||
|
||||
psxMemRLUT = psxMemWLUT + 0x2000; //(uptr*)_aligned_malloc(0x10000 * sizeof(uptr),16);
|
||||
|
||||
VtlbMemoryReserve::Assign(std::move(allocator), HostMemoryMap::IOPmemOffset, sizeof(*iopMem));
|
||||
iopMem = reinterpret_cast<IopVM_MemoryAllocMess*>(GetPtr());
|
||||
}
|
||||
|
||||
void iopMemoryReserve::Release()
|
||||
{
|
||||
_parent::Release();
|
||||
|
||||
safe_aligned_free(psxMemWLUT);
|
||||
psxMemRLUT = nullptr;
|
||||
iopMem = nullptr;
|
||||
}
|
||||
|
||||
// Note! Resetting the IOP's memory state is dependent on having *all* psx memory allocated,
|
||||
|
@ -58,12 +72,6 @@ void iopMemoryReserve::Reset()
|
|||
|
||||
pxAssert( iopMem );
|
||||
|
||||
if (!psxMemWLUT)
|
||||
{
|
||||
psxMemWLUT = (uptr*)_aligned_malloc(0x2000 * sizeof(uptr) * 2, 16);
|
||||
psxMemRLUT = psxMemWLUT + 0x2000; //(uptr*)_aligned_malloc(0x10000 * sizeof(uptr),16);
|
||||
}
|
||||
|
||||
DbgCon.WriteLn("IOP resetting main memory...");
|
||||
|
||||
memset(psxMemWLUT, 0, 0x2000 * sizeof(uptr) * 2); // clears both allocations, RLUT and WLUT
|
||||
|
@ -115,16 +123,6 @@ void iopMemoryReserve::Reset()
|
|||
//for (i=0; i<0x0008; i++) psxMemWLUT[i + 0xbfc0] = (uptr)&psR[i << 16];
|
||||
}
|
||||
|
||||
void iopMemoryReserve::Decommit()
|
||||
{
|
||||
_parent::Decommit();
|
||||
|
||||
safe_aligned_free(psxMemWLUT);
|
||||
psxMemRLUT = NULL;
|
||||
iopMem = NULL;
|
||||
}
|
||||
|
||||
|
||||
u8 iopMemRead8(u32 mem)
|
||||
{
|
||||
mem &= 0x1fffffff;
|
||||
|
|
|
@ -327,7 +327,7 @@ __fi void VU_Thread::Write(u32 val)
|
|||
m_write_pos += 1;
|
||||
}
|
||||
|
||||
__fi void VU_Thread::Write(void* src, u32 size)
|
||||
__fi void VU_Thread::Write(const void* src, u32 size)
|
||||
{
|
||||
memcpy(GetWritePtr(), src, size);
|
||||
m_write_pos += size_u32(size);
|
||||
|
@ -472,7 +472,7 @@ void VU_Thread::ExecuteVU(u32 vu_addr, u32 vif_top, u32 vif_itop, u32 fbrst)
|
|||
}
|
||||
}
|
||||
|
||||
void VU_Thread::VifUnpack(vifStruct& _vif, VIFregisters& _vifRegs, u8* data, u32 size)
|
||||
void VU_Thread::VifUnpack(vifStruct& _vif, VIFregisters& _vifRegs, const u8* data, u32 size)
|
||||
{
|
||||
MTVU_LOG("MTVU - VifUnpack!");
|
||||
u32 vif_copy_size = (uptr)&_vif.StructEnd - (uptr)&_vif.tag;
|
||||
|
@ -486,7 +486,7 @@ void VU_Thread::VifUnpack(vifStruct& _vif, VIFregisters& _vifRegs, u8* data, u32
|
|||
KickStart();
|
||||
}
|
||||
|
||||
void VU_Thread::WriteMicroMem(u32 vu_micro_addr, void* data, u32 size)
|
||||
void VU_Thread::WriteMicroMem(u32 vu_micro_addr, const void* data, u32 size)
|
||||
{
|
||||
MTVU_LOG("MTVU - WriteMicroMem!");
|
||||
ReserveSpace(3 + size_u32(size));
|
||||
|
@ -498,7 +498,7 @@ void VU_Thread::WriteMicroMem(u32 vu_micro_addr, void* data, u32 size)
|
|||
KickStart();
|
||||
}
|
||||
|
||||
void VU_Thread::WriteDataMem(u32 vu_data_addr, void* data, u32 size)
|
||||
void VU_Thread::WriteDataMem(u32 vu_data_addr, const void* data, u32 size)
|
||||
{
|
||||
MTVU_LOG("MTVU - WriteDataMem!");
|
||||
ReserveSpace(3 + size_u32(size));
|
||||
|
|
|
@ -88,13 +88,13 @@ public:
|
|||
|
||||
void ExecuteVU(u32 vu_addr, u32 vif_top, u32 vif_itop, u32 fbrst);
|
||||
|
||||
void VifUnpack(vifStruct& _vif, VIFregisters& _vifRegs, u8* data, u32 size);
|
||||
void VifUnpack(vifStruct& _vif, VIFregisters& _vifRegs, const u8* data, u32 size);
|
||||
|
||||
// Writes to VU's Micro Memory (size in bytes)
|
||||
void WriteMicroMem(u32 vu_micro_addr, void* data, u32 size);
|
||||
void WriteMicroMem(u32 vu_micro_addr, const void* data, u32 size);
|
||||
|
||||
// Writes to VU's Data Memory (size in bytes)
|
||||
void WriteDataMem(u32 vu_data_addr, void* data, u32 size);
|
||||
void WriteDataMem(u32 vu_data_addr, const void* data, u32 size);
|
||||
|
||||
void WriteVIRegs(REG_VI* viRegs);
|
||||
|
||||
|
@ -123,7 +123,7 @@ private:
|
|||
void ReadRegs(VIFregisters* dest);
|
||||
|
||||
void Write(u32 val);
|
||||
void Write(void* src, u32 size);
|
||||
void Write(const void* src, u32 size);
|
||||
void WriteRegs(VIFregisters* src);
|
||||
|
||||
u32 Get_vuCycles();
|
||||
|
|
120
pcsx2/Memory.cpp
120
pcsx2/Memory.cpp
|
@ -224,9 +224,9 @@ static mem32_t nullRead32(u32 mem) {
|
|||
MEM_LOG("Read uninstalled memory at address %08x", mem);
|
||||
return 0;
|
||||
}
|
||||
static RETURNS_R64 nullRead64(u32 mem) {
|
||||
static mem64_t nullRead64(u32 mem) {
|
||||
MEM_LOG("Read uninstalled memory at address %08x", mem);
|
||||
return r64_zero();
|
||||
return 0;
|
||||
}
|
||||
static RETURNS_R128 nullRead128(u32 mem) {
|
||||
MEM_LOG("Read uninstalled memory at address %08x", mem);
|
||||
|
@ -244,11 +244,11 @@ static void nullWrite32(u32 mem, mem32_t value)
|
|||
{
|
||||
MEM_LOG("Write uninstalled memory at address %08x", mem);
|
||||
}
|
||||
static void nullWrite64(u32 mem, const mem64_t *value)
|
||||
static void nullWrite64(u32 mem, mem64_t value)
|
||||
{
|
||||
MEM_LOG("Write uninstalled memory at address %08x", mem);
|
||||
}
|
||||
static void nullWrite128(u32 mem, const mem128_t *value)
|
||||
static void TAKES_R128 nullWrite128(u32 mem, r128 value)
|
||||
{
|
||||
MEM_LOG("Write uninstalled memory at address %08x", mem);
|
||||
}
|
||||
|
@ -328,18 +328,18 @@ static mem32_t _ext_memRead32(u32 mem)
|
|||
}
|
||||
|
||||
template<int p>
|
||||
static RETURNS_R64 _ext_memRead64(u32 mem)
|
||||
static u64 _ext_memRead64(u32 mem)
|
||||
{
|
||||
switch (p)
|
||||
{
|
||||
case 6: // gsm
|
||||
return r64_from_u64(gsRead64(mem));
|
||||
return gsRead64(mem);
|
||||
default: break;
|
||||
}
|
||||
|
||||
MEM_LOG("Unknown Memory read64 from address %8.8x", mem);
|
||||
cpuTlbMissR(mem, cpuRegs.branch);
|
||||
return r64_zero();
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<int p>
|
||||
|
@ -416,7 +416,7 @@ static void _ext_memWrite32(u32 mem, mem32_t value)
|
|||
}
|
||||
|
||||
template<int p>
|
||||
static void _ext_memWrite64(u32 mem, const mem64_t* value)
|
||||
static void _ext_memWrite64(u32 mem, mem64_t value)
|
||||
{
|
||||
|
||||
/*switch (p) {
|
||||
|
@ -427,12 +427,12 @@ static void _ext_memWrite64(u32 mem, const mem64_t* value)
|
|||
// gsWrite64(mem & ~0xa0000000, *value); return;
|
||||
}*/
|
||||
|
||||
MEM_LOG("Unknown Memory write64 to address %x with data %8.8x_%8.8x", mem, (u32)(*value>>32), (u32)*value);
|
||||
MEM_LOG("Unknown Memory write64 to address %x with data %8.8x_%8.8x", mem, (u32)(value>>32), (u32)value);
|
||||
cpuTlbMissW(mem, cpuRegs.branch);
|
||||
}
|
||||
|
||||
template<int p>
|
||||
static void _ext_memWrite128(u32 mem, const mem128_t *value)
|
||||
static void TAKES_R128 _ext_memWrite128(u32 mem, r128 value)
|
||||
{
|
||||
/*switch (p) {
|
||||
//case 1: // hwm
|
||||
|
@ -444,7 +444,8 @@ static void _ext_memWrite128(u32 mem, const mem128_t *value)
|
|||
// gsWrite64(mem+8, value[1]); return;
|
||||
}*/
|
||||
|
||||
MEM_LOG("Unknown Memory write128 to address %x with data %8.8x_%8.8x_%8.8x_%8.8x", mem, ((u32*)value)[3], ((u32*)value)[2], ((u32*)value)[1], ((u32*)value)[0]);
|
||||
alignas(16) const u128 uvalue = r128_to_u128(value);
|
||||
MEM_LOG("Unknown Memory write128 to address %x with data %8.8x_%8.8x_%8.8x_%8.8x", mem, uvalue._u32[3], uvalue._u32[2], uvalue._u32[1], uvalue._u32[0]);
|
||||
cpuTlbMissW(mem, cpuRegs.branch);
|
||||
}
|
||||
|
||||
|
@ -480,12 +481,12 @@ template<int vunum> static mem32_t vuMicroRead32(u32 addr) {
|
|||
if (vunum && THREAD_VU1) vu1Thread.WaitVU();
|
||||
return *(u32*)&vu->Micro[addr];
|
||||
}
|
||||
template<int vunum> static RETURNS_R64 vuMicroRead64(u32 addr) {
|
||||
template<int vunum> static mem64_t vuMicroRead64(u32 addr) {
|
||||
VURegs* vu = vunum ? &VU1 : &VU0;
|
||||
addr &= vunum ? 0x3fff: 0xfff;
|
||||
|
||||
if (vunum && THREAD_VU1) vu1Thread.WaitVU();
|
||||
return r64_load(&vu->Micro[addr]);
|
||||
return *(u64*)&vu->Micro[addr];
|
||||
}
|
||||
template<int vunum> static RETURNS_R128 vuMicroRead128(u32 addr) {
|
||||
VURegs* vu = vunum ? &VU1 : &VU0;
|
||||
|
@ -536,31 +537,33 @@ template<int vunum> static void vuMicroWrite32(u32 addr, mem32_t data) {
|
|||
*(u32*)&vu->Micro[addr] =data;
|
||||
}
|
||||
}
|
||||
template<int vunum> static void vuMicroWrite64(u32 addr, const mem64_t* data) {
|
||||
template<int vunum> static void vuMicroWrite64(u32 addr, mem64_t data) {
|
||||
VURegs* vu = vunum ? &VU1 : &VU0;
|
||||
addr &= vunum ? 0x3fff: 0xfff;
|
||||
|
||||
if (vunum && THREAD_VU1) {
|
||||
vu1Thread.WriteMicroMem(addr, (void*)data, sizeof(u64));
|
||||
vu1Thread.WriteMicroMem(addr, &data, sizeof(u64));
|
||||
return;
|
||||
}
|
||||
|
||||
if (*(u64*)&vu->Micro[addr]!=data[0]) {
|
||||
if (*(u64*)&vu->Micro[addr]!=data) {
|
||||
ClearVuFunc<vunum>(addr, 8);
|
||||
*(u64*)&vu->Micro[addr] =data[0];
|
||||
*(u64*)&vu->Micro[addr] =data;
|
||||
}
|
||||
}
|
||||
template<int vunum> static void vuMicroWrite128(u32 addr, const mem128_t* data) {
|
||||
template<int vunum> static void TAKES_R128 vuMicroWrite128(u32 addr, r128 data) {
|
||||
VURegs* vu = vunum ? &VU1 : &VU0;
|
||||
addr &= vunum ? 0x3fff: 0xfff;
|
||||
|
||||
const u128 udata = r128_to_u128(data);
|
||||
|
||||
if (vunum && THREAD_VU1) {
|
||||
vu1Thread.WriteMicroMem(addr, (void*)data, sizeof(u128));
|
||||
vu1Thread.WriteMicroMem(addr, &udata, sizeof(u128));
|
||||
return;
|
||||
}
|
||||
if ((u128&)vu->Micro[addr]!=*data) {
|
||||
if ((u128&)vu->Micro[addr]!=udata) {
|
||||
ClearVuFunc<vunum>(addr, 16);
|
||||
CopyQWC(&vu->Micro[addr],data);
|
||||
r128_store_unaligned(&vu->Micro[addr],data);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -583,11 +586,11 @@ template<int vunum> static mem32_t vuDataRead32(u32 addr) {
|
|||
if (vunum && THREAD_VU1) vu1Thread.WaitVU();
|
||||
return *(u32*)&vu->Mem[addr];
|
||||
}
|
||||
template<int vunum> static RETURNS_R64 vuDataRead64(u32 addr) {
|
||||
template<int vunum> static mem64_t vuDataRead64(u32 addr) {
|
||||
VURegs* vu = vunum ? &VU1 : &VU0;
|
||||
addr &= vunum ? 0x3fff: 0xfff;
|
||||
if (vunum && THREAD_VU1) vu1Thread.WaitVU();
|
||||
return r64_load(&vu->Mem[addr]);
|
||||
return *(u64*)&vu->Mem[addr];
|
||||
}
|
||||
template<int vunum> static RETURNS_R128 vuDataRead128(u32 addr) {
|
||||
VURegs* vu = vunum ? &VU1 : &VU0;
|
||||
|
@ -624,23 +627,24 @@ template<int vunum> static void vuDataWrite32(u32 addr, mem32_t data) {
|
|||
}
|
||||
*(u32*)&vu->Mem[addr] = data;
|
||||
}
|
||||
template<int vunum> static void vuDataWrite64(u32 addr, const mem64_t* data) {
|
||||
template<int vunum> static void vuDataWrite64(u32 addr, mem64_t data) {
|
||||
VURegs* vu = vunum ? &VU1 : &VU0;
|
||||
addr &= vunum ? 0x3fff: 0xfff;
|
||||
if (vunum && THREAD_VU1) {
|
||||
vu1Thread.WriteDataMem(addr, (void*)data, sizeof(u64));
|
||||
vu1Thread.WriteDataMem(addr, &data, sizeof(u64));
|
||||
return;
|
||||
}
|
||||
*(u64*)&vu->Mem[addr] = data[0];
|
||||
*(u64*)&vu->Mem[addr] = data;
|
||||
}
|
||||
template<int vunum> static void vuDataWrite128(u32 addr, const mem128_t* data) {
|
||||
template<int vunum> static void TAKES_R128 vuDataWrite128(u32 addr, r128 data) {
|
||||
VURegs* vu = vunum ? &VU1 : &VU0;
|
||||
addr &= vunum ? 0x3fff: 0xfff;
|
||||
if (vunum && THREAD_VU1) {
|
||||
vu1Thread.WriteDataMem(addr, (void*)data, sizeof(u128));
|
||||
alignas(16) const u128 udata = r128_to_u128(data);
|
||||
vu1Thread.WriteDataMem(addr, &udata, sizeof(u128));
|
||||
return;
|
||||
}
|
||||
CopyQWC(&vu->Mem[addr], data);
|
||||
r128_store_unaligned(&vu->Mem[addr], data);
|
||||
}
|
||||
|
||||
|
||||
|
@ -712,30 +716,31 @@ void memBindConditionalHandlers()
|
|||
// eeMemoryReserve (implementations)
|
||||
// --------------------------------------------------------------------------------------
|
||||
eeMemoryReserve::eeMemoryReserve()
|
||||
: _parent( "EE Main Memory", sizeof(*eeMem) )
|
||||
: _parent("EE Main Memory")
|
||||
{
|
||||
}
|
||||
|
||||
void eeMemoryReserve::Reserve(VirtualMemoryManagerPtr allocator)
|
||||
eeMemoryReserve::~eeMemoryReserve()
|
||||
{
|
||||
_parent::Reserve(std::move(allocator), HostMemoryMap::EEmemOffset);
|
||||
//_parent::Reserve(EmuConfig.HostMap.IOP);
|
||||
Release();
|
||||
}
|
||||
|
||||
void eeMemoryReserve::Commit()
|
||||
void eeMemoryReserve::Assign(VirtualMemoryManagerPtr allocator)
|
||||
{
|
||||
_parent::Commit();
|
||||
eeMem = (EEVM_MemoryAllocMess*)m_reserve.GetPtr();
|
||||
_parent::Assign(std::move(allocator), HostMemoryMap::EEmemOffset, sizeof(*eeMem));
|
||||
eeMem = reinterpret_cast<EEVM_MemoryAllocMess*>(GetPtr());
|
||||
|
||||
if (!mmap_faultHandler)
|
||||
{
|
||||
pxAssert(Source_PageFault);
|
||||
mmap_faultHandler = new mmap_PageFaultHandler();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Resets memory mappings, unmaps TLBs, reloads bios roms, etc.
|
||||
void eeMemoryReserve::Reset()
|
||||
{
|
||||
if(!mmap_faultHandler) {
|
||||
pxAssert(Source_PageFault);
|
||||
mmap_faultHandler = new mmap_PageFaultHandler();
|
||||
}
|
||||
|
||||
_parent::Reset();
|
||||
|
||||
// Note!! Ideally the vtlb should only be initialized once, and then subsequent
|
||||
|
@ -860,23 +865,18 @@ void eeMemoryReserve::Reset()
|
|||
constexpr bool needs_bios = true;
|
||||
#endif
|
||||
|
||||
// TODO(Stenzek): Move BIOS loading out and far away...
|
||||
if (needs_bios && !LoadBIOS())
|
||||
pxFailRel("Failed to load BIOS");
|
||||
}
|
||||
|
||||
void eeMemoryReserve::Decommit()
|
||||
{
|
||||
_parent::Decommit();
|
||||
eeMem = NULL;
|
||||
}
|
||||
|
||||
eeMemoryReserve::~eeMemoryReserve()
|
||||
void eeMemoryReserve::Release()
|
||||
{
|
||||
safe_delete(mmap_faultHandler);
|
||||
vtlb_Term();
|
||||
eeMem = nullptr;
|
||||
_parent::Release();
|
||||
}
|
||||
|
||||
|
||||
// ===========================================================================================
|
||||
// Memory Protection and Block Checking, vtlb Style!
|
||||
// ===========================================================================================
|
||||
|
@ -915,7 +915,7 @@ struct vtlb_PageProtectionInfo
|
|||
vtlb_ProtectionMode Mode;
|
||||
};
|
||||
|
||||
alignas(16) static vtlb_PageProtectionInfo m_PageProtectInfo[Ps2MemSize::MainRam >> 12];
|
||||
alignas(16) static vtlb_PageProtectionInfo m_PageProtectInfo[Ps2MemSize::MainRam >> __pageshift];
|
||||
|
||||
|
||||
// returns:
|
||||
|
@ -931,10 +931,10 @@ vtlb_ProtectionMode mmap_GetRamPageInfo( u32 paddr )
|
|||
uptr ptr = (uptr)PSM( paddr );
|
||||
uptr rampage = ptr - (uptr)eeMem->Main;
|
||||
|
||||
if (rampage >= Ps2MemSize::MainRam)
|
||||
if (!ptr || rampage >= Ps2MemSize::MainRam)
|
||||
return ProtMode_NotRequired; //not in ram, no tracking done ...
|
||||
|
||||
rampage >>= 12;
|
||||
rampage >>= __pageshift;
|
||||
|
||||
return m_PageProtectInfo[rampage].Mode;
|
||||
}
|
||||
|
@ -944,10 +944,10 @@ void mmap_MarkCountedRamPage( u32 paddr )
|
|||
{
|
||||
pxAssert( eeMem );
|
||||
|
||||
paddr &= ~0xfff;
|
||||
paddr &= ~__pagemask;
|
||||
|
||||
uptr ptr = (uptr)PSM( paddr );
|
||||
int rampage = (ptr - (uptr)eeMem->Main) >> 12;
|
||||
int rampage = (ptr - (uptr)eeMem->Main) >> __pageshift;
|
||||
|
||||
// Important: Update the ReverseRamMap here because TLB changes could alter the paddr
|
||||
// mapping into eeMem->Main.
|
||||
|
@ -959,11 +959,11 @@ void mmap_MarkCountedRamPage( u32 paddr )
|
|||
|
||||
eeRecPerfLog.Write( (m_PageProtectInfo[rampage].Mode == ProtMode_Manual) ?
|
||||
"Re-protecting page @ 0x%05x" : "Protected page @ 0x%05x",
|
||||
paddr>>12
|
||||
paddr>>__pageshift
|
||||
);
|
||||
|
||||
m_PageProtectInfo[rampage].Mode = ProtMode_Write;
|
||||
HostSys::MemProtect( &eeMem->Main[rampage<<12], __pagesize, PageAccess_ReadOnly() );
|
||||
HostSys::MemProtect( &eeMem->Main[rampage<<__pageshift], __pagesize, PageAccess_ReadOnly() );
|
||||
}
|
||||
|
||||
// offset - offset of address relative to psM.
|
||||
|
@ -973,16 +973,16 @@ static __fi void mmap_ClearCpuBlock( uint offset )
|
|||
{
|
||||
pxAssert( eeMem );
|
||||
|
||||
int rampage = offset >> 12;
|
||||
int rampage = offset >> __pageshift;
|
||||
|
||||
// Assertion: This function should never be run on a block that's already under
|
||||
// manual protection. Indicates a logic error in the recompiler or protection code.
|
||||
pxAssertMsg( m_PageProtectInfo[rampage].Mode != ProtMode_Manual,
|
||||
"Attempted to clear a block that is already under manual protection." );
|
||||
|
||||
HostSys::MemProtect( &eeMem->Main[rampage<<12], __pagesize, PageAccess_ReadWrite() );
|
||||
HostSys::MemProtect( &eeMem->Main[rampage<<__pageshift], __pagesize, PageAccess_ReadWrite() );
|
||||
m_PageProtectInfo[rampage].Mode = ProtMode_Manual;
|
||||
Cpu->Clear( m_PageProtectInfo[rampage].ReverseRamMap, 0x400 );
|
||||
Cpu->Clear( m_PageProtectInfo[rampage].ReverseRamMap, __pagesize );
|
||||
}
|
||||
|
||||
void mmap_PageFaultHandler::OnPageFaultEvent( const PageFaultInfo& info, bool& handled )
|
||||
|
|
|
@ -131,21 +131,18 @@ extern void mmap_ResetBlockTracking();
|
|||
#define memRead8 vtlb_memRead<mem8_t>
|
||||
#define memRead16 vtlb_memRead<mem16_t>
|
||||
#define memRead32 vtlb_memRead<mem32_t>
|
||||
#define memRead64 vtlb_memRead<mem64_t>
|
||||
|
||||
#define memWrite8 vtlb_memWrite<mem8_t>
|
||||
#define memWrite16 vtlb_memWrite<mem16_t>
|
||||
#define memWrite32 vtlb_memWrite<mem32_t>
|
||||
|
||||
static __fi void memRead64(u32 mem, mem64_t* out) { _mm_storel_epi64((__m128i*)out, vtlb_memRead64(mem)); }
|
||||
static __fi void memRead64(u32 mem, mem64_t& out) { memRead64(mem, &out); }
|
||||
#define memWrite64 vtlb_memWrite<mem64_t>
|
||||
|
||||
static __fi void memRead128(u32 mem, mem128_t* out) { _mm_store_si128((__m128i*)out, vtlb_memRead128(mem)); }
|
||||
static __fi void memRead128(u32 mem, mem128_t& out) { memRead128(mem, &out); }
|
||||
|
||||
static __fi void memWrite64(u32 mem, const mem64_t* val) { vtlb_memWrite64(mem, val); }
|
||||
static __fi void memWrite64(u32 mem, const mem64_t& val) { vtlb_memWrite64(mem, &val); }
|
||||
static __fi void memWrite128(u32 mem, const mem128_t* val) { vtlb_memWrite128(mem, val); }
|
||||
static __fi void memWrite128(u32 mem, const mem128_t& val) { vtlb_memWrite128(mem, &val); }
|
||||
static __fi void memWrite128(u32 mem, const mem128_t* val) { vtlb_memWrite128(mem, r128_load(val)); }
|
||||
static __fi void memWrite128(u32 mem, const mem128_t& val) { vtlb_memWrite128(mem, r128_load(&val)); }
|
||||
|
||||
|
||||
extern u16 ba0R16(u32 mem);
|
||||
|
|
|
@ -38,50 +38,6 @@ typedef u32 mem32_t;
|
|||
typedef u64 mem64_t;
|
||||
typedef u128 mem128_t;
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// Future-Planned VTLB pagefault scheme!
|
||||
// --------------------------------------------------------------------------------------
|
||||
// When enabled, the VTLB will use a large-area reserved memory range of 512megs for EE
|
||||
// physical ram/rom access. The base ram will be committed at 0x00000000, and ROMs will be
|
||||
// at 0x1fc00000, etc. All memory ranges in between will be uncommitted memory -- which
|
||||
// means that the memory will *not* count against the operating system's physical memory
|
||||
// pool.
|
||||
//
|
||||
// When the VTLB generates memory operations (loads/stores), it will assume that the op
|
||||
// is addressing either RAM or ROM, and by assuming that it can generate a completely efficient
|
||||
// direct memory access (one AND and one MOV instruction). If the access is to another area of
|
||||
// memory, such as hardware registers or scratchpad, the access will generate a page fault, the
|
||||
// compiled block will be cleared and re-compiled using "full" VTLB translation logic.
|
||||
//
|
||||
// Note that support for this feature may not be doable under x86/32 platforms, due to the
|
||||
// 2gb/3gb limit of Windows XP (the 3gb feature will make it slightly more feasible at least).
|
||||
//
|
||||
#define VTLB_UsePageFaulting 0
|
||||
|
||||
#if VTLB_UsePageFaulting
|
||||
|
||||
// The order of the components in this struct *matter* -- it has been laid out so that the
|
||||
// full breadth of PS2 RAM and ROM mappings are directly supported.
|
||||
struct EEVM_MemoryAllocMess
|
||||
{
|
||||
u8 (&Main)[Ps2MemSize::MainRam]; // Main memory (hard-wired to 32MB)
|
||||
|
||||
u8 _padding1[0x1e000000-Ps2MemSize::MainRam]
|
||||
u8 (&ROM1)[Ps2MemSize::Rom1]; // DVD player
|
||||
|
||||
u8 _padding2[0x1e040000-(0x1e000000+Ps2MemSize::Rom1)]
|
||||
u8 (&EROM)[Ps2MemSize::ERom]; // DVD player extensions
|
||||
|
||||
u8 _padding3[0x1e400000-(0x1e040000+Ps2MemSize::EROM)]
|
||||
u8 (&ROM2)[Ps2MemSize::Rom2]; // Chinese extensions
|
||||
|
||||
u8 _padding4[0x1fc00000-(0x1e040000+Ps2MemSize::Rom2)];
|
||||
u8 (&ROM)[Ps2MemSize::Rom]; // Boot rom (4MB)
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
struct EEVM_MemoryAllocMess
|
||||
{
|
||||
u8 Main[Ps2MemSize::MainRam]; // Main memory (hard-wired to 32MB)
|
||||
|
@ -100,8 +56,6 @@ struct EEVM_MemoryAllocMess
|
|||
u8 ZeroWrite[_1mb];
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
struct IopVM_MemoryAllocMess
|
||||
{
|
||||
u8 Main[Ps2MemSize::IopRam]; // Main memory (hard-wired to 2MB)
|
||||
|
|
|
@ -329,8 +329,7 @@ PINEServer::IPCBuffer PINEServer::ParseCommand(char* buf, char* ret_buffer, u32
|
|||
if (!SafetyChecks(buf_cnt, 4, ret_cnt, 8, buf_size))
|
||||
goto error;
|
||||
const u32 a = FromArray<u32>(&buf[buf_cnt], 0);
|
||||
u64 res = 0;
|
||||
memRead64(a, &res);
|
||||
const u64 res = memRead64(a);
|
||||
ToArray(ret_buffer, res, ret_cnt);
|
||||
ret_cnt += 8;
|
||||
buf_cnt += 4;
|
||||
|
|
|
@ -388,7 +388,6 @@ void handle_extended_t(IniPatch *p)
|
|||
// Patch.cpp itself declares this prototype, so make sure to keep in sync.
|
||||
void _ApplyPatch(IniPatch *p)
|
||||
{
|
||||
u64 mem = 0;
|
||||
u64 ledata = 0;
|
||||
|
||||
if (p->enabled == 0) return;
|
||||
|
@ -414,9 +413,8 @@ void _ApplyPatch(IniPatch *p)
|
|||
break;
|
||||
|
||||
case DOUBLE_T:
|
||||
memRead64(p->addr, &mem);
|
||||
if (mem != p->data)
|
||||
memWrite64(p->addr, &p->data);
|
||||
if (memRead64(p->addr) != (u64)p->data)
|
||||
memWrite64(p->addr, (u64)p->data);
|
||||
break;
|
||||
|
||||
case EXTENDED_T:
|
||||
|
@ -437,9 +435,8 @@ void _ApplyPatch(IniPatch *p)
|
|||
|
||||
case DOUBLE_LE_T:
|
||||
ledata = SwapEndian(p->data, 64);
|
||||
memRead64(p->addr, &mem);
|
||||
if (mem != ledata)
|
||||
memWrite64(p->addr, ledata);
|
||||
if (memRead64(p->addr) != (u64)ledata)
|
||||
memWrite64(p->addr, (u64)ledata);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -187,9 +187,6 @@ struct R3000Acpu {
|
|||
s32 (*ExecuteBlock)( s32 eeCycles ); // executes the given number of EE cycles.
|
||||
void (*Clear)(u32 Addr, u32 Size);
|
||||
void (*Shutdown)();
|
||||
|
||||
uint (*GetCacheReserve)();
|
||||
void (*SetCacheReserve)( uint reserveInMegs );
|
||||
};
|
||||
|
||||
extern R3000Acpu *psxCpu;
|
||||
|
|
|
@ -308,22 +308,10 @@ static void intClear(u32 Addr, u32 Size) {
|
|||
static void intShutdown() {
|
||||
}
|
||||
|
||||
static void intSetCacheReserve( uint reserveInMegs )
|
||||
{
|
||||
}
|
||||
|
||||
static uint intGetCacheReserve()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
R3000Acpu psxInt = {
|
||||
intReserve,
|
||||
intReset,
|
||||
intExecuteBlock,
|
||||
intClear,
|
||||
intShutdown,
|
||||
|
||||
intGetCacheReserve,
|
||||
intSetCacheReserve
|
||||
intShutdown
|
||||
};
|
||||
|
|
|
@ -81,10 +81,11 @@ extern SysMainMemory& GetVmMemory();
|
|||
void cpuReset()
|
||||
{
|
||||
vu1Thread.WaitVU();
|
||||
vu1Thread.Reset();
|
||||
if (GetMTGS().IsOpen())
|
||||
GetMTGS().WaitGS(); // GS better be done processing before we reset the EE, just in case.
|
||||
|
||||
GetVmMemory().ResetAll();
|
||||
GetVmMemory().Reset();
|
||||
|
||||
memzero(cpuRegs);
|
||||
memzero(fpuRegs);
|
||||
|
@ -116,6 +117,8 @@ void cpuReset()
|
|||
ElfCRC = 0;
|
||||
DiscSerial.clear();
|
||||
ElfEntry = -1;
|
||||
g_GameStarted = false;
|
||||
g_GameLoading = false;
|
||||
|
||||
// Probably not the right place, but it has to be done when the ram is actually initialized
|
||||
USBsetRAM(iopMem->Main);
|
||||
|
|
|
@ -382,9 +382,6 @@ struct R5900cpu
|
|||
// doesn't matter if we're stripping it out soon. ;)
|
||||
//
|
||||
void (*Clear)(u32 Addr, u32 Size);
|
||||
|
||||
uint (*GetCacheReserve)();
|
||||
void (*SetCacheReserve)( uint reserveInMegs );
|
||||
};
|
||||
|
||||
extern R5900cpu *Cpu;
|
||||
|
|
|
@ -667,7 +667,7 @@ void LD()
|
|||
if( addr & 7 )
|
||||
throw R5900Exception::AddressError( addr, false );
|
||||
|
||||
memRead64(addr, (u64*)gpr_GetWritePtr(_Rt_));
|
||||
cpuRegs.GPR.r[_Rt_].UD[0] = memRead64(addr);
|
||||
}
|
||||
|
||||
static const u64 LDL_MASK[8] =
|
||||
|
@ -688,8 +688,7 @@ void LDL()
|
|||
u32 addr = cpuRegs.GPR.r[_Rs_].UL[0] + _Imm_;
|
||||
u32 shift = addr & 7;
|
||||
|
||||
u64 mem;
|
||||
memRead64(addr & ~7, &mem);
|
||||
u64 mem = memRead64(addr & ~7);
|
||||
|
||||
if( !_Rt_ ) return;
|
||||
cpuRegs.GPR.r[_Rt_].UD[0] = (cpuRegs.GPR.r[_Rt_].UD[0] & LDL_MASK[shift]) |
|
||||
|
@ -701,8 +700,7 @@ void LDR()
|
|||
u32 addr = cpuRegs.GPR.r[_Rs_].UL[0] + _Imm_;
|
||||
u32 shift = addr & 7;
|
||||
|
||||
u64 mem;
|
||||
memRead64(addr & ~7, &mem);
|
||||
u64 mem = memRead64(addr & ~7);
|
||||
|
||||
if (!_Rt_) return;
|
||||
cpuRegs.GPR.r[_Rt_].UD[0] = (cpuRegs.GPR.r[_Rt_].UD[0] & LDR_MASK[shift]) |
|
||||
|
@ -798,7 +796,7 @@ void SD()
|
|||
if( addr & 7 )
|
||||
throw R5900Exception::AddressError( addr, true );
|
||||
|
||||
memWrite64(addr,&cpuRegs.GPR.r[_Rt_].UD[0]);
|
||||
memWrite64(addr,cpuRegs.GPR.r[_Rt_].UD[0]);
|
||||
}
|
||||
|
||||
static const u64 SDL_MASK[8] =
|
||||
|
@ -817,12 +815,10 @@ void SDL()
|
|||
{
|
||||
u32 addr = cpuRegs.GPR.r[_Rs_].UL[0] + _Imm_;
|
||||
u32 shift = addr & 7;
|
||||
u64 mem;
|
||||
|
||||
memRead64(addr & ~7, &mem);
|
||||
u64 mem = memRead64(addr & ~7);
|
||||
mem = (cpuRegs.GPR.r[_Rt_].UD[0] >> SDL_SHIFT[shift]) |
|
||||
(mem & SDL_MASK[shift]);
|
||||
memWrite64(addr & ~7, &mem);
|
||||
memWrite64(addr & ~7, mem);
|
||||
}
|
||||
|
||||
|
||||
|
@ -830,12 +826,10 @@ void SDR()
|
|||
{
|
||||
u32 addr = cpuRegs.GPR.r[_Rs_].UL[0] + _Imm_;
|
||||
u32 shift = addr & 7;
|
||||
u64 mem;
|
||||
|
||||
memRead64(addr & ~7, &mem);
|
||||
u64 mem = memRead64(addr & ~7);
|
||||
mem = (cpuRegs.GPR.r[_Rt_].UD[0] << SDR_SHIFT[shift]) |
|
||||
(mem & SDR_MASK[shift]);
|
||||
memWrite64(addr & ~7, &mem );
|
||||
memWrite64(addr & ~7, mem );
|
||||
}
|
||||
|
||||
void SQ()
|
||||
|
|
|
@ -21,58 +21,46 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <cstring>
|
||||
#include <immintrin.h>
|
||||
#include <emmintrin.h>
|
||||
|
||||
// Can't stick them in structs because it breaks calling convention things, yay
|
||||
using r64 = __m128i;
|
||||
using r128 = __m128i;
|
||||
|
||||
// Calling convention setting, yay
|
||||
#define RETURNS_R64 r64 __vectorcall
|
||||
#define RETURNS_R128 r128 __vectorcall
|
||||
#define TAKES_R64 __vectorcall
|
||||
#define TAKES_R128 __vectorcall
|
||||
|
||||
// And since we can't stick them in structs, we get lots of static methods, yay!
|
||||
|
||||
__forceinline static r64 r64_load(const void* ptr)
|
||||
{
|
||||
return _mm_loadl_epi64(reinterpret_cast<const r64*>(ptr));
|
||||
}
|
||||
|
||||
__forceinline static r64 r64_zero()
|
||||
{
|
||||
return _mm_setzero_si128();
|
||||
}
|
||||
|
||||
__forceinline static r64 r64_from_u32(u32 val)
|
||||
{
|
||||
return _mm_cvtsi32_si128(val);
|
||||
}
|
||||
|
||||
__forceinline static r64 r64_from_u32x2(u32 lo, u32 hi)
|
||||
{
|
||||
return _mm_unpacklo_epi32(_mm_cvtsi32_si128(lo), _mm_cvtsi32_si128(hi));
|
||||
}
|
||||
|
||||
__forceinline static r64 r64_from_u64(u64 val)
|
||||
{
|
||||
return _mm_cvtsi64_si128(val);
|
||||
}
|
||||
|
||||
__forceinline static r128 r128_load(const void* ptr)
|
||||
{
|
||||
return _mm_load_si128(reinterpret_cast<const r128*>(ptr));
|
||||
}
|
||||
|
||||
__forceinline static void r128_store(void* ptr, r128 val)
|
||||
{
|
||||
return _mm_store_si128(reinterpret_cast<r128*>(ptr), val);
|
||||
}
|
||||
|
||||
__forceinline static void r128_store_unaligned(void* ptr, r128 val)
|
||||
{
|
||||
return _mm_storeu_si128(reinterpret_cast<r128*>(ptr), val);
|
||||
}
|
||||
|
||||
__forceinline static r128 r128_zero()
|
||||
{
|
||||
return _mm_setzero_si128();
|
||||
}
|
||||
|
||||
/// Expects that r64 came from r64-handling code, and not from a recompiler or something
|
||||
__forceinline static r128 r128_from_r64_clean(r64 val)
|
||||
__forceinline static r128 r128_from_u64_dup(u64 val)
|
||||
{
|
||||
return val;
|
||||
return _mm_set1_epi64x(val);
|
||||
}
|
||||
__forceinline static r128 r128_from_u64_zext(u64 val)
|
||||
{
|
||||
return _mm_set_epi64x(0, val);
|
||||
}
|
||||
|
||||
__forceinline static r128 r128_from_u32x4(u32 lo0, u32 lo1, u32 hi0, u32 hi1)
|
||||
|
@ -80,17 +68,31 @@ __forceinline static r128 r128_from_u32x4(u32 lo0, u32 lo1, u32 hi0, u32 hi1)
|
|||
return _mm_setr_epi32(lo0, lo1, hi0, hi1);
|
||||
}
|
||||
|
||||
__forceinline static r128 r128_from_u128(const u128& u)
|
||||
{
|
||||
return _mm_loadu_si128(reinterpret_cast<const __m128i*>(&u));
|
||||
}
|
||||
|
||||
__forceinline static u32 r128_to_u32(r128 val)
|
||||
{
|
||||
return _mm_cvtsi128_si32(val);
|
||||
}
|
||||
|
||||
__forceinline static u64 r128_to_u64(r128 val)
|
||||
{
|
||||
return _mm_cvtsi128_si64(val);
|
||||
}
|
||||
|
||||
__forceinline static u128 r128_to_u128(r128 val)
|
||||
{
|
||||
alignas(16) u128 ret;
|
||||
_mm_store_si128(reinterpret_cast<r128*>(&ret), val);
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <typename u>
|
||||
struct rhelper;
|
||||
|
||||
template <>
|
||||
struct rhelper<u64>
|
||||
{
|
||||
using r = r64;
|
||||
__forceinline static r load(void* ptr) { return r64_load(ptr); }
|
||||
__forceinline static r zero() { return r64_zero(); }
|
||||
};
|
||||
|
||||
template <>
|
||||
struct rhelper<u128>
|
||||
{
|
||||
|
|
214
pcsx2/System.cpp
214
pcsx2/System.cpp
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include "System/RecTypes.h"
|
||||
|
||||
#include "common/Align.h"
|
||||
#include "common/MemsetFast.inl"
|
||||
#include "common/Perf.h"
|
||||
#include "common/StringUtil.h"
|
||||
|
@ -59,82 +60,73 @@ void SetCPUState(SSE_MXCSR sseMXCSR, SSE_MXCSR sseVUMXCSR)
|
|||
// Constructor!
|
||||
// Parameters:
|
||||
// name - a nice long name that accurately describes the contents of this reserve.
|
||||
RecompiledCodeReserve::RecompiledCodeReserve( std::string name, uint defCommit )
|
||||
: VirtualMemoryReserve( std::move(name), defCommit )
|
||||
RecompiledCodeReserve::RecompiledCodeReserve(std::string name)
|
||||
: VirtualMemoryReserve(std::move(name))
|
||||
{
|
||||
m_prot_mode = PageAccess_Any();
|
||||
}
|
||||
|
||||
RecompiledCodeReserve::~RecompiledCodeReserve()
|
||||
{
|
||||
_termProfiler();
|
||||
Release();
|
||||
}
|
||||
|
||||
void RecompiledCodeReserve::_registerProfiler()
|
||||
{
|
||||
if (m_profiler_name.empty() || !IsOk()) return;
|
||||
if (m_profiler_name.empty() || !IsOk())
|
||||
return;
|
||||
|
||||
Perf::any.map((uptr)m_baseptr, GetReserveSizeInBytes(), m_profiler_name.c_str());
|
||||
Perf::any.map((uptr)m_baseptr, m_size, m_profiler_name.c_str());
|
||||
}
|
||||
|
||||
void RecompiledCodeReserve::_termProfiler()
|
||||
void RecompiledCodeReserve::Assign(VirtualMemoryManagerPtr allocator, size_t offset, size_t size)
|
||||
{
|
||||
}
|
||||
// Anything passed to the memory allocator must be page aligned.
|
||||
size = Common::PageAlign(size);
|
||||
|
||||
void* RecompiledCodeReserve::Assign( VirtualMemoryManagerPtr allocator, void *baseptr, size_t size )
|
||||
{
|
||||
if (!_parent::Assign(std::move(allocator), baseptr, size)) return NULL;
|
||||
|
||||
Commit();
|
||||
// Since the memory has already been allocated as part of the main memory map, this should never fail.
|
||||
u8* base = allocator->Alloc(offset, size);
|
||||
if (!base)
|
||||
{
|
||||
Console.WriteLn("(RecompiledCodeReserve) Failed to allocate %zu bytes for %s at offset %zu", size, m_name.c_str(), offset);
|
||||
pxFailRel("RecompiledCodeReserve allocation failed.");
|
||||
}
|
||||
|
||||
VirtualMemoryReserve::Assign(std::move(allocator), base, size);
|
||||
_registerProfiler();
|
||||
|
||||
return m_baseptr;
|
||||
}
|
||||
|
||||
void RecompiledCodeReserve::Reset()
|
||||
{
|
||||
_parent::Reset();
|
||||
|
||||
Commit();
|
||||
}
|
||||
|
||||
bool RecompiledCodeReserve::Commit()
|
||||
{
|
||||
bool status = _parent::Commit();
|
||||
|
||||
if (IsDevBuild && m_baseptr)
|
||||
{
|
||||
// Clear the recompiled code block to 0xcc (INT3) -- this helps disasm tools show
|
||||
// the assembly dump more cleanly. We don't clear the block on Release builds since
|
||||
// it can add a noticeable amount of overhead to large block recompilations.
|
||||
|
||||
memset(m_baseptr, 0xCC, m_pages_commited * __pagesize);
|
||||
std::memset(m_baseptr, 0xCC, m_size);
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
void RecompiledCodeReserve::AllowModification()
|
||||
{
|
||||
HostSys::MemProtect(m_baseptr, m_size, PageAccess_Any());
|
||||
}
|
||||
|
||||
void RecompiledCodeReserve::ForbidModification()
|
||||
{
|
||||
HostSys::MemProtect(m_baseptr, m_size, PageProtectionMode().Read().Execute());
|
||||
}
|
||||
|
||||
// Sets the abbreviated name used by the profiler. Name should be under 10 characters long.
|
||||
// After a name has been set, a profiler source will be automatically registered and cleared
|
||||
// in accordance with changes in the reserve area.
|
||||
RecompiledCodeReserve& RecompiledCodeReserve::SetProfilerName( std::string shortname )
|
||||
RecompiledCodeReserve& RecompiledCodeReserve::SetProfilerName(std::string name)
|
||||
{
|
||||
m_profiler_name = std::move(shortname);
|
||||
m_profiler_name = std::move(name);
|
||||
_registerProfiler();
|
||||
return *this;
|
||||
}
|
||||
|
||||
// This error message is shared by R5900, R3000, and microVU recompilers.
|
||||
void RecompiledCodeReserve::ThrowIfNotOk() const
|
||||
{
|
||||
if (IsOk()) return;
|
||||
|
||||
throw Exception::OutOfMemory(m_name)
|
||||
.SetDiagMsg("Recompiled code cache could not be mapped.")
|
||||
.SetUserMsg("This recompiler was unable to reserve contiguous memory required for internal caches. This error can be caused by low virtual memory resources, such as a small or disabled swapfile, or by another program that is hogging a lot of memory.");
|
||||
}
|
||||
|
||||
#include "svnrev.h"
|
||||
|
||||
Pcsx2Config EmuConfig;
|
||||
|
@ -315,23 +307,24 @@ namespace HostMemoryMap {
|
|||
}
|
||||
|
||||
/// Attempts to find a spot near static variables for the main memory
|
||||
static VirtualMemoryManagerPtr makeMainMemoryManager() {
|
||||
static VirtualMemoryManagerPtr makeMemoryManager(const char* name, const char* file_mapping_name, size_t size, size_t offset_from_base)
|
||||
{
|
||||
// Everything looks nicer when the start of all the sections is a nice round looking number.
|
||||
// Also reduces the variation in the address due to small changes in code.
|
||||
// Breaks ASLR but so does anything else that tries to make addresses constant for our debugging pleasure
|
||||
uptr codeBase = (uptr)(void*)makeMainMemoryManager / (1 << 28) * (1 << 28);
|
||||
uptr codeBase = (uptr)(void*)makeMemoryManager / (1 << 28) * (1 << 28);
|
||||
|
||||
// The allocation is ~640mb in size, slighly under 3*2^28.
|
||||
// We'll hope that the code generated for the PCSX2 executable stays under 512mb (which is likely)
|
||||
// On x86-64, code can reach 8*2^28 from its address [-6*2^28, 4*2^28] is the region that allows for code in the 640mb allocation to reach 512mb of code that either starts at codeBase or 256mb before it.
|
||||
// We start high and count down because on macOS code starts at the beginning of useable address space, so starting as far ahead as possible reduces address variations due to code size. Not sure about other platforms. Obviously this only actually affects what shows up in a debugger and won't affect performance or correctness of anything.
|
||||
for (int offset = 4; offset >= -6; offset--) {
|
||||
uptr base = codeBase + (offset << 28);
|
||||
if ((sptr)base < 0 || (sptr)(base + HostMemoryMap::Size - 1) < 0) {
|
||||
uptr base = codeBase + (offset << 28) + offset_from_base;
|
||||
if ((sptr)base < 0 || (sptr)(base + size - 1) < 0) {
|
||||
// VTLB will throw a fit if we try to put EE main memory here
|
||||
continue;
|
||||
}
|
||||
auto mgr = std::make_shared<VirtualMemoryManager>("Main Memory Manager", base, HostMemoryMap::Size, /*upper_bounds=*/0, /*strict=*/true);
|
||||
auto mgr = std::make_shared<VirtualMemoryManager>(name, file_mapping_name, base, size, /*upper_bounds=*/0, /*strict=*/true);
|
||||
if (mgr->IsOk()) {
|
||||
return mgr;
|
||||
}
|
||||
|
@ -340,69 +333,54 @@ static VirtualMemoryManagerPtr makeMainMemoryManager() {
|
|||
// If the above failed and it's x86-64, recompiled code is going to break!
|
||||
// If it's i386 anything can reach anything so it doesn't matter
|
||||
if (sizeof(void*) == 8) {
|
||||
pxAssertRel(0, "Failed to find a good place for the main memory allocation, recompilers may fail");
|
||||
pxAssertRel(0, "Failed to find a good place for the memory allocation, recompilers may fail");
|
||||
}
|
||||
return std::make_shared<VirtualMemoryManager>("Main Memory Manager", 0, HostMemoryMap::Size);
|
||||
return std::make_shared<VirtualMemoryManager>(name, file_mapping_name, 0, size);
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// SysReserveVM (implementations)
|
||||
// --------------------------------------------------------------------------------------
|
||||
SysMainMemory::SysMainMemory()
|
||||
: m_mainMemory(makeMainMemoryManager())
|
||||
, m_bumpAllocator(m_mainMemory, HostMemoryMap::bumpAllocatorOffset, HostMemoryMap::Size - HostMemoryMap::bumpAllocatorOffset)
|
||||
: m_mainMemory(makeMemoryManager("Main Memory Manager", "pcsx2", HostMemoryMap::MainSize, 0))
|
||||
, m_codeMemory(makeMemoryManager("Code Memory Manager", nullptr, HostMemoryMap::CodeSize, HostMemoryMap::MainSize))
|
||||
, m_bumpAllocator(m_mainMemory, HostMemoryMap::bumpAllocatorOffset, HostMemoryMap::MainSize - HostMemoryMap::bumpAllocatorOffset)
|
||||
{
|
||||
uptr base = (uptr)MainMemory()->GetBase();
|
||||
HostMemoryMap::EEmem = base + HostMemoryMap::EEmemOffset;
|
||||
HostMemoryMap::IOPmem = base + HostMemoryMap::IOPmemOffset;
|
||||
HostMemoryMap::VUmem = base + HostMemoryMap::VUmemOffset;
|
||||
HostMemoryMap::EErec = base + HostMemoryMap::EErecOffset;
|
||||
HostMemoryMap::IOPrec = base + HostMemoryMap::IOPrecOffset;
|
||||
HostMemoryMap::VIF0rec = base + HostMemoryMap::VIF0recOffset;
|
||||
HostMemoryMap::VIF1rec = base + HostMemoryMap::VIF1recOffset;
|
||||
HostMemoryMap::mVU0rec = base + HostMemoryMap::mVU0recOffset;
|
||||
HostMemoryMap::mVU1rec = base + HostMemoryMap::mVU1recOffset;
|
||||
HostMemoryMap::bumpAllocator = base + HostMemoryMap::bumpAllocatorOffset;
|
||||
uptr main_base = (uptr)MainMemory()->GetBase();
|
||||
uptr code_base = (uptr)MainMemory()->GetBase();
|
||||
HostMemoryMap::EEmem = main_base + HostMemoryMap::EEmemOffset;
|
||||
HostMemoryMap::IOPmem = main_base + HostMemoryMap::IOPmemOffset;
|
||||
HostMemoryMap::VUmem = main_base + HostMemoryMap::VUmemOffset;
|
||||
HostMemoryMap::EErec = code_base + HostMemoryMap::EErecOffset;
|
||||
HostMemoryMap::IOPrec = code_base + HostMemoryMap::IOPrecOffset;
|
||||
HostMemoryMap::VIF0rec = code_base + HostMemoryMap::VIF0recOffset;
|
||||
HostMemoryMap::VIF1rec = code_base + HostMemoryMap::VIF1recOffset;
|
||||
HostMemoryMap::mVU0rec = code_base + HostMemoryMap::mVU0recOffset;
|
||||
HostMemoryMap::mVU1rec = code_base + HostMemoryMap::mVU1recOffset;
|
||||
HostMemoryMap::bumpAllocator = main_base + HostMemoryMap::bumpAllocatorOffset;
|
||||
}
|
||||
|
||||
SysMainMemory::~SysMainMemory()
|
||||
{
|
||||
try {
|
||||
ReleaseAll();
|
||||
}
|
||||
DESTRUCTOR_CATCHALL
|
||||
Release();
|
||||
}
|
||||
|
||||
void SysMainMemory::ReserveAll()
|
||||
bool SysMainMemory::Allocate()
|
||||
{
|
||||
DevCon.WriteLn(Color_StrongBlue, "Allocating host memory for virtual systems...");
|
||||
pxInstallSignalHandler();
|
||||
|
||||
DevCon.WriteLn( Color_StrongBlue, "Mapping host memory for virtual systems..." );
|
||||
ConsoleIndentScope indent(1);
|
||||
|
||||
m_ee.Reserve(MainMemory());
|
||||
m_iop.Reserve(MainMemory());
|
||||
m_vu.Reserve(MainMemory());
|
||||
}
|
||||
|
||||
void SysMainMemory::CommitAll()
|
||||
{
|
||||
m_ee.Assign(MainMemory());
|
||||
m_iop.Assign(MainMemory());
|
||||
m_vu.Assign(MainMemory());
|
||||
vtlb_Core_Alloc();
|
||||
if (m_ee.IsCommitted() && m_iop.IsCommitted() && m_vu.IsCommitted()) return;
|
||||
|
||||
DevCon.WriteLn( Color_StrongBlue, "Allocating host memory for virtual systems..." );
|
||||
ConsoleIndentScope indent(1);
|
||||
|
||||
m_ee.Commit();
|
||||
m_iop.Commit();
|
||||
m_vu.Commit();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void SysMainMemory::ResetAll()
|
||||
void SysMainMemory::Reset()
|
||||
{
|
||||
CommitAll();
|
||||
|
||||
DevCon.WriteLn( Color_StrongBlue, "Resetting host memory for virtual systems..." );
|
||||
ConsoleIndentScope indent(1);
|
||||
|
||||
|
@ -413,49 +391,21 @@ void SysMainMemory::ResetAll()
|
|||
// Note: newVif is reset as part of other VIF structures.
|
||||
}
|
||||
|
||||
void SysMainMemory::DecommitAll()
|
||||
void SysMainMemory::Release()
|
||||
{
|
||||
if (!m_ee.IsCommitted() && !m_iop.IsCommitted() && !m_vu.IsCommitted()) return;
|
||||
|
||||
Console.WriteLn( Color_Blue, "Decommitting host memory for virtual systems..." );
|
||||
Console.WriteLn( Color_Blue, "Releasing host memory for virtual systems..." );
|
||||
ConsoleIndentScope indent(1);
|
||||
|
||||
// On linux, the MTVU isn't empty and the thread still uses the m_ee/m_vu memory
|
||||
vu1Thread.WaitVU();
|
||||
// The EE thread must be stopped here command mustn't be send
|
||||
// to the ring. Let's call it an extra safety valve :)
|
||||
vu1Thread.Reset();
|
||||
|
||||
hwShutdown();
|
||||
|
||||
m_ee.Decommit();
|
||||
m_iop.Decommit();
|
||||
m_vu.Decommit();
|
||||
|
||||
closeNewVif(0);
|
||||
closeNewVif(1);
|
||||
|
||||
g_GameStarted = false;
|
||||
g_GameLoading = false;
|
||||
|
||||
vtlb_Core_Free();
|
||||
}
|
||||
|
||||
void SysMainMemory::ReleaseAll()
|
||||
{
|
||||
DecommitAll();
|
||||
|
||||
Console.WriteLn( Color_Blue, "Releasing host memory maps for virtual systems..." );
|
||||
ConsoleIndentScope indent(1);
|
||||
|
||||
vtlb_Core_Free(); // Just to be sure... (calling order could result in it getting missed during Decommit).
|
||||
|
||||
releaseNewVif(0);
|
||||
releaseNewVif(1);
|
||||
|
||||
m_ee.Decommit();
|
||||
m_iop.Decommit();
|
||||
m_vu.Decommit();
|
||||
m_ee.Release();
|
||||
m_iop.Release();
|
||||
m_vu.Release();
|
||||
|
||||
safe_delete(Source_PageFault);
|
||||
}
|
||||
|
@ -492,6 +442,7 @@ SysCpuProviderPack::SysCpuProviderPack()
|
|||
}
|
||||
|
||||
// hmm! : VU0 and VU1 pre-allocations should do sVU and mVU separately? Sounds complicated. :(
|
||||
// TODO(Stenzek): error handling in this whole function...
|
||||
|
||||
if (newVifDynaRec)
|
||||
{
|
||||
|
@ -588,37 +539,6 @@ void SysClearExecutionCache()
|
|||
}
|
||||
}
|
||||
|
||||
// Maps a block of memory for use as a recompiled code buffer, and ensures that the
|
||||
// allocation is below a certain memory address (specified in "bounds" parameter).
|
||||
// The allocated block has code execution privileges.
|
||||
// Returns NULL on allocation failure.
|
||||
u8* SysMmapEx(uptr base, u32 size, uptr bounds, const char *caller)
|
||||
{
|
||||
u8* Mem = (u8*)HostSys::Mmap( base, size );
|
||||
|
||||
if( (Mem == NULL) || (bounds != 0 && (((uptr)Mem + size) > bounds)) )
|
||||
{
|
||||
if( base )
|
||||
{
|
||||
DbgCon.Warning( "First try failed allocating %s at address 0x%x", caller, base );
|
||||
|
||||
// Let's try again at an OS-picked memory area, and then hope it meets needed
|
||||
// boundschecking criteria below.
|
||||
SafeSysMunmap( Mem, size );
|
||||
Mem = (u8*)HostSys::Mmap( 0, size );
|
||||
}
|
||||
|
||||
if( (bounds != 0) && (((uptr)Mem + size) > bounds) )
|
||||
{
|
||||
DevCon.Warning( "Second try failed allocating %s, block ptr 0x%x does not meet required criteria.", caller, Mem );
|
||||
SafeSysMunmap( Mem, size );
|
||||
|
||||
// returns NULL, caller should throw an exception.
|
||||
}
|
||||
}
|
||||
return Mem;
|
||||
}
|
||||
|
||||
std::string SysGetBiosDiscID()
|
||||
{
|
||||
// FIXME: we should return a serial based on
|
||||
|
|
|
@ -43,12 +43,12 @@ class RecompiledCodeReserve;
|
|||
|
||||
namespace HostMemoryMap
|
||||
{
|
||||
static const u32 Size = 0x28000000;
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// Main
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
static const u32 MainSize = 0x14000000;
|
||||
|
||||
// The actual addresses may not be equivalent to Base + Offset in the event that allocation at Base failed
|
||||
// Each of these offsets has a debugger-accessible equivalent variable without the Offset suffix that will hold the actual address (not here because we don't want code using it)
|
||||
|
||||
// PS2 main memory, SPR, and ROMs
|
||||
// PS2 main memory, SPR, and ROMs (approximately 40.5MB, but we round up to 64MB for simplicity).
|
||||
static const u32 EEmemOffset = 0x00000000;
|
||||
|
||||
// IOP main memory and ROMs
|
||||
|
@ -57,54 +57,65 @@ namespace HostMemoryMap
|
|||
// VU0 and VU1 memory.
|
||||
static const u32 VUmemOffset = 0x08000000;
|
||||
|
||||
// EE recompiler code cache area (64mb)
|
||||
static const u32 EErecOffset = 0x10000000;
|
||||
|
||||
// IOP recompiler code cache area (16 or 32mb)
|
||||
static const u32 IOPrecOffset = 0x14000000;
|
||||
|
||||
// newVif0 recompiler code cache area (16mb)
|
||||
static const u32 VIF0recOffset = 0x16000000;
|
||||
|
||||
// newVif1 recompiler code cache area (32mb)
|
||||
static const u32 VIF1recOffset = 0x18000000;
|
||||
|
||||
// microVU1 recompiler code cache area (32 or 64mb)
|
||||
static const u32 mVU0recOffset = 0x1C000000;
|
||||
|
||||
// microVU0 recompiler code cache area (64mb)
|
||||
static const u32 mVU1recOffset = 0x20000000;
|
||||
|
||||
// Bump allocator for any other small allocations
|
||||
// size: Difference between it and HostMemoryMap::Size, so nothing should allocate higher than it!
|
||||
static const u32 bumpAllocatorOffset = 0x24000000;
|
||||
static const u32 bumpAllocatorOffset = 0x10000000;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// Code
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
static const u32 CodeSize = 0x0F100000; // 241 mb
|
||||
|
||||
// EE recompiler code cache area (64mb)
|
||||
static const u32 EErecOffset = 0x00000000;
|
||||
|
||||
// IOP recompiler code cache area (32mb)
|
||||
static const u32 IOPrecOffset = 0x04000000;
|
||||
|
||||
// newVif0 recompiler code cache area (8mb)
|
||||
static const u32 VIF0recOffset = 0x06000000;
|
||||
|
||||
// newVif1 recompiler code cache area (8mb)
|
||||
static const u32 VIF1recOffset = 0x06800000;
|
||||
|
||||
// microVU1 recompiler code cache area (64mb)
|
||||
static const u32 mVU0recOffset = 0x07000000;
|
||||
|
||||
// microVU0 recompiler code cache area (64mb)
|
||||
static const u32 mVU1recOffset = 0x0B000000;
|
||||
|
||||
// SSE-optimized VIF unpack functions (1mb)
|
||||
static const u32 VIFUnpackRecOffset = 0x0F000000;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// SysMainMemory
|
||||
// --------------------------------------------------------------------------------------
|
||||
// This class provides the main memory for the virtual machines.
|
||||
class SysMainMemory
|
||||
class SysMainMemory final
|
||||
{
|
||||
protected:
|
||||
const VirtualMemoryManagerPtr m_mainMemory;
|
||||
VirtualMemoryBumpAllocator m_bumpAllocator;
|
||||
eeMemoryReserve m_ee;
|
||||
iopMemoryReserve m_iop;
|
||||
vuMemoryReserve m_vu;
|
||||
const VirtualMemoryManagerPtr m_codeMemory;
|
||||
|
||||
VirtualMemoryBumpAllocator m_bumpAllocator;
|
||||
|
||||
eeMemoryReserve m_ee;
|
||||
iopMemoryReserve m_iop;
|
||||
vuMemoryReserve m_vu;
|
||||
|
||||
public:
|
||||
SysMainMemory();
|
||||
virtual ~SysMainMemory();
|
||||
~SysMainMemory();
|
||||
|
||||
const VirtualMemoryManagerPtr& MainMemory() { return m_mainMemory; }
|
||||
VirtualMemoryBumpAllocator& BumpAllocator() { return m_bumpAllocator; }
|
||||
const VirtualMemoryManagerPtr& MainMemory() { return m_mainMemory; }
|
||||
const VirtualMemoryManagerPtr& CodeMemory() { return m_codeMemory; }
|
||||
|
||||
virtual void ReserveAll();
|
||||
virtual void CommitAll();
|
||||
virtual void ResetAll();
|
||||
virtual void DecommitAll();
|
||||
virtual void ReleaseAll();
|
||||
VirtualMemoryBumpAllocator& BumpAllocator() { return m_bumpAllocator; }
|
||||
|
||||
bool Allocate();
|
||||
void Reset();
|
||||
void Release();
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -161,8 +172,6 @@ extern SysCpuProviderPack& GetCpuProviders();
|
|||
extern void SysLogMachineCaps(); // Detects cpu type and fills cpuInfo structs.
|
||||
extern void SysClearExecutionCache(); // clears recompiled execution caches!
|
||||
|
||||
extern u8 *SysMmapEx(uptr base, u32 size, uptr bounds, const char *caller="Unnamed");
|
||||
|
||||
extern std::string SysGetBiosDiscID();
|
||||
extern std::string SysGetDiscID();
|
||||
|
||||
|
|
|
@ -28,29 +28,23 @@ class RecompiledCodeReserve : public VirtualMemoryReserve
|
|||
typedef VirtualMemoryReserve _parent;
|
||||
|
||||
protected:
|
||||
std::string m_profiler_name;
|
||||
std::string m_profiler_name;
|
||||
|
||||
public:
|
||||
RecompiledCodeReserve( std::string name, uint defCommit = 0 );
|
||||
virtual ~RecompiledCodeReserve();
|
||||
RecompiledCodeReserve(std::string name);
|
||||
~RecompiledCodeReserve();
|
||||
|
||||
virtual void* Assign( VirtualMemoryManagerPtr allocator, void *baseptr, size_t size ) override;
|
||||
virtual void Reset() override;
|
||||
virtual bool Commit() override;
|
||||
void Assign(VirtualMemoryManagerPtr allocator, size_t offset, size_t size);
|
||||
void Reset();
|
||||
|
||||
virtual RecompiledCodeReserve& SetProfilerName( std::string shortname );
|
||||
RecompiledCodeReserve& SetProfilerName(std::string name);
|
||||
|
||||
void ThrowIfNotOk() const;
|
||||
void ForbidModification();
|
||||
void AllowModification();
|
||||
|
||||
operator void*() { return m_baseptr; }
|
||||
operator const void*() const { return m_baseptr; }
|
||||
|
||||
operator u8*() { return (u8*)m_baseptr; }
|
||||
operator const u8*() const { return (u8*)m_baseptr; }
|
||||
operator u8*() { return m_baseptr; }
|
||||
operator const u8*() const { return m_baseptr; }
|
||||
|
||||
protected:
|
||||
void ResetProcessReserves() const;
|
||||
|
||||
void _registerProfiler();
|
||||
void _termProfiler();
|
||||
};
|
||||
|
|
|
@ -279,8 +279,7 @@ bool VMManager::Internal::InitializeMemory()
|
|||
s_vm_memory = std::make_unique<SysMainMemory>();
|
||||
s_cpu_provider_pack = std::make_unique<SysCpuProviderPack>();
|
||||
|
||||
s_vm_memory->ReserveAll();
|
||||
return true;
|
||||
return s_vm_memory->Allocate();
|
||||
}
|
||||
|
||||
void VMManager::Internal::ReleaseMemory()
|
||||
|
@ -290,8 +289,6 @@ void VMManager::Internal::ReleaseMemory()
|
|||
std::vector<u8>().swap(s_no_interlacing_cheats_data);
|
||||
s_no_interlacing_cheats_loaded = false;
|
||||
|
||||
s_vm_memory->DecommitAll();
|
||||
s_vm_memory->ReleaseAll();
|
||||
s_vm_memory.reset();
|
||||
s_cpu_provider_pack.reset();
|
||||
}
|
||||
|
@ -882,9 +879,6 @@ bool VMManager::Initialize(VMBootParameters boot_params)
|
|||
if (!GSDumpReplayer::IsReplayingDump() && !CheckBIOSAvailability())
|
||||
return false;
|
||||
|
||||
Console.WriteLn("Allocating memory map...");
|
||||
s_vm_memory->CommitAll();
|
||||
|
||||
Console.WriteLn("Opening CDVD...");
|
||||
if (!DoCDVDopen())
|
||||
{
|
||||
|
@ -1067,6 +1061,7 @@ void VMManager::Shutdown(bool save_resume_state)
|
|||
|
||||
ForgetLoadedPatches();
|
||||
R3000A::ioman::reset();
|
||||
vtlb_Shutdown();
|
||||
USBclose();
|
||||
SPU2close();
|
||||
PADclose();
|
||||
|
@ -1093,8 +1088,6 @@ void VMManager::Shutdown(bool save_resume_state)
|
|||
DEV9shutdown();
|
||||
GSshutdown();
|
||||
|
||||
s_vm_memory->DecommitAll();
|
||||
|
||||
s_state.store(VMState::Shutdown, std::memory_order_release);
|
||||
Host::OnVMDestroyed();
|
||||
}
|
||||
|
|
|
@ -21,27 +21,36 @@
|
|||
|
||||
alignas(16) VURegs vuRegs[2];
|
||||
|
||||
|
||||
vuMemoryReserve::vuMemoryReserve()
|
||||
: _parent( "VU0/1 on-chip memory", VU1_PROGSIZE + VU1_MEMSIZE + VU0_PROGSIZE + VU0_MEMSIZE )
|
||||
: _parent("VU0/1 on-chip memory")
|
||||
{
|
||||
}
|
||||
|
||||
void vuMemoryReserve::Reserve(VirtualMemoryManagerPtr allocator)
|
||||
vuMemoryReserve::~vuMemoryReserve()
|
||||
{
|
||||
_parent::Reserve(std::move(allocator), HostMemoryMap::VUmemOffset);
|
||||
//_parent::Reserve(EmuConfig.HostMemMap.VUmem);
|
||||
Release();
|
||||
}
|
||||
|
||||
u8* curpos = m_reserve.GetPtr();
|
||||
void vuMemoryReserve::Assign(VirtualMemoryManagerPtr allocator)
|
||||
{
|
||||
static constexpr u32 VU_MEMORY_RESERVE_SIZE = VU1_PROGSIZE + VU1_MEMSIZE + VU0_PROGSIZE + VU0_MEMSIZE;
|
||||
|
||||
_parent::Assign(std::move(allocator), HostMemoryMap::VUmemOffset, VU_MEMORY_RESERVE_SIZE);
|
||||
|
||||
u8* curpos = GetPtr();
|
||||
VU0.Micro = curpos; curpos += VU0_PROGSIZE;
|
||||
VU0.Mem = curpos; curpos += VU0_MEMSIZE;
|
||||
VU1.Micro = curpos; curpos += VU1_PROGSIZE;
|
||||
VU1.Mem = curpos; curpos += VU1_MEMSIZE;
|
||||
}
|
||||
|
||||
vuMemoryReserve::~vuMemoryReserve()
|
||||
void vuMemoryReserve::Release()
|
||||
{
|
||||
VU0.Micro = VU0.Mem = NULL;
|
||||
VU1.Micro = VU1.Mem = NULL;
|
||||
_parent::Release();
|
||||
|
||||
VU0.Micro = VU0.Mem = nullptr;
|
||||
VU1.Micro = VU1.Mem = nullptr;
|
||||
}
|
||||
|
||||
void vuMemoryReserve::Reset()
|
||||
|
|
|
@ -162,7 +162,8 @@ void Pcsx2App::AllocateCoreStuffs()
|
|||
SysLogMachineCaps();
|
||||
AppApplySettings();
|
||||
|
||||
GetVmReserve().ReserveAll();
|
||||
if (!GetVmReserve().Allocate())
|
||||
pxFailRel("Failed to allocate memory.");
|
||||
|
||||
if (!m_CpuProviders)
|
||||
{
|
||||
|
|
|
@ -160,7 +160,6 @@ void SysCoreThread::ResetQuick()
|
|||
void SysCoreThread::Reset()
|
||||
{
|
||||
ResetQuick();
|
||||
GetVmMemory().DecommitAll();
|
||||
SysClearExecutionCache();
|
||||
sApp.PostAppMethod(&Pcsx2App::leaveDebugMode);
|
||||
g_FrameCount = 0;
|
||||
|
@ -216,8 +215,6 @@ void SysCoreThread::_reset_stuff_as_needed()
|
|||
// because of changes to the TLB. We don't actually support the TLB, however, so rec
|
||||
// resets aren't in fact *needed* ... yet. But might as well, no harm. --air
|
||||
|
||||
GetVmMemory().CommitAll();
|
||||
|
||||
if (m_resetVirtualMachine || m_resetRecompilers || m_resetProfilers)
|
||||
{
|
||||
SysClearExecutionCache();
|
||||
|
|
|
@ -16,13 +16,14 @@
|
|||
#pragma once
|
||||
|
||||
#include "Hw.h"
|
||||
#include "SingleRegisterTypes.h"
|
||||
|
||||
|
||||
// hw read functions
|
||||
template< uint page > extern mem8_t hwRead8 (u32 mem);
|
||||
template< uint page > extern mem16_t hwRead16 (u32 mem);
|
||||
template< uint page > extern mem32_t hwRead32 (u32 mem);
|
||||
template< uint page > extern RETURNS_R64 hwRead64 (u32 mem);
|
||||
template< uint page > extern mem64_t hwRead64 (u32 mem);
|
||||
template< uint page > extern RETURNS_R128 hwRead128(u32 mem);
|
||||
|
||||
// Internal hwRead32 which does not log reads, used by hwWrite8/16 to perform
|
||||
|
@ -39,8 +40,8 @@ template<uint page> extern void hwWrite8 (u32 mem, u8 value);
|
|||
template<uint page> extern void hwWrite16 (u32 mem, u16 value);
|
||||
|
||||
template<uint page> extern void hwWrite32 (u32 mem, mem32_t value);
|
||||
template<uint page> extern void hwWrite64 (u32 mem, const mem64_t* srcval);
|
||||
template<uint page> extern void hwWrite128(u32 mem, const mem128_t* srcval);
|
||||
template<uint page> extern void hwWrite64 (u32 mem, mem64_t srcval);
|
||||
template<uint page> extern void TAKES_R128 hwWrite128(u32 mem, r128 srcval);
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// Hardware FIFOs (128 bit access only!)
|
||||
|
|
|
@ -288,11 +288,11 @@ static __ri void eeHwTraceLog( u32 addr, T val, bool mode )
|
|||
}
|
||||
else if constexpr (sizeof(T) == 8)
|
||||
{
|
||||
valStr = fmt::format("0x{:08x}.{:08x}", ((u32*)&val)[1], ((u32*)&val)[0]);
|
||||
valStr = fmt::format("0x{:08x}.{:08x}", static_cast<u32>(val >> 32), static_cast<u32>(val));
|
||||
}
|
||||
else if constexpr (sizeof(T) == 16)
|
||||
{
|
||||
valStr = StringUtil::U128ToString((u128&)val);
|
||||
valStr = StringUtil::U128ToString(r128_to_u128(val));
|
||||
}
|
||||
|
||||
static const char* temp = "%-12s @ 0x%08X/%-16s %s %s";
|
||||
|
@ -301,4 +301,4 @@ static __ri void eeHwTraceLog( u32 addr, T val, bool mode )
|
|||
HW_LOG( temp, labelStr.c_str(), addr, regname, mode ? "->" : "<-", valStr.c_str() );
|
||||
else
|
||||
UnknownHW_LOG( temp, labelStr.c_str(), addr, "Unknown", mode ? "->" : "<-", valStr.c_str() );
|
||||
}
|
||||
}
|
||||
|
|
161
pcsx2/vtlb.cpp
161
pcsx2/vtlb.cpp
|
@ -35,7 +35,9 @@
|
|||
#include "COP0.h"
|
||||
#include "Cache.h"
|
||||
#include "R5900Exceptions.h"
|
||||
#include "IopMem.h"
|
||||
|
||||
#include "common/Align.h"
|
||||
#include "common/MemsetFast.inl"
|
||||
|
||||
#include "fmt/core.h"
|
||||
|
@ -137,6 +139,9 @@ DataType vtlb_memRead(u32 addr)
|
|||
case 32:
|
||||
return readCache32(addr);
|
||||
break;
|
||||
case 64:
|
||||
return readCache64(addr);
|
||||
break;
|
||||
|
||||
jNO_DEFAULT;
|
||||
}
|
||||
|
@ -159,6 +164,8 @@ DataType vtlb_memRead(u32 addr)
|
|||
return vmv.assumeHandler<16, false>()(paddr);
|
||||
case 32:
|
||||
return vmv.assumeHandler<32, false>()(paddr);
|
||||
case 64:
|
||||
return vmv.assumeHandler<64, false>()(paddr);
|
||||
|
||||
jNO_DEFAULT;
|
||||
}
|
||||
|
@ -166,30 +173,6 @@ DataType vtlb_memRead(u32 addr)
|
|||
return 0; // technically unreachable, but suppresses warnings.
|
||||
}
|
||||
|
||||
RETURNS_R64 vtlb_memRead64(u32 mem)
|
||||
{
|
||||
auto vmv = vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
|
||||
if (!vmv.isHandler(mem))
|
||||
{
|
||||
if (!CHECK_EEREC) {
|
||||
if(CHECK_CACHE && CheckCache(mem))
|
||||
{
|
||||
return readCache64(mem);
|
||||
}
|
||||
}
|
||||
|
||||
return r64_load(reinterpret_cast<const void*>(vmv.assumePtr(mem)));
|
||||
}
|
||||
else
|
||||
{
|
||||
//has to: translate, find function, call function
|
||||
u32 paddr = vmv.assumeHandlerGetPAddr(mem);
|
||||
//Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
|
||||
return vmv.assumeHandler<64, false>()(paddr);
|
||||
}
|
||||
}
|
||||
|
||||
RETURNS_R128 vtlb_memRead128(u32 mem)
|
||||
{
|
||||
auto vmv = vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
|
@ -239,6 +222,9 @@ void vtlb_memWrite(u32 addr, DataType data)
|
|||
case 32:
|
||||
writeCache32(addr, data);
|
||||
return;
|
||||
case 64:
|
||||
writeCache64(addr, data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -254,7 +240,7 @@ void vtlb_memWrite(u32 addr, DataType data)
|
|||
}
|
||||
}
|
||||
|
||||
void vtlb_memWrite64(u32 mem, const mem64_t* value)
|
||||
void TAKES_R128 vtlb_memWrite128(u32 mem, r128 value)
|
||||
{
|
||||
auto vmv = vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
|
||||
|
@ -264,39 +250,13 @@ void vtlb_memWrite64(u32 mem, const mem64_t* value)
|
|||
{
|
||||
if(CHECK_CACHE && CheckCache(mem))
|
||||
{
|
||||
writeCache64(mem, *value);
|
||||
alignas(16) const u128 r = r128_to_u128(value);
|
||||
writeCache128(mem, &r);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
*(mem64_t*)vmv.assumePtr(mem) = *value;
|
||||
}
|
||||
else
|
||||
{
|
||||
//has to: translate, find function, call function
|
||||
u32 paddr = vmv.assumeHandlerGetPAddr(mem);
|
||||
//Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
|
||||
|
||||
vmv.assumeHandler<64, true>()(paddr, value);
|
||||
}
|
||||
}
|
||||
|
||||
void vtlb_memWrite128(u32 mem, const mem128_t *value)
|
||||
{
|
||||
auto vmv = vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
|
||||
if (!vmv.isHandler(mem))
|
||||
{
|
||||
if (!CHECK_EEREC)
|
||||
{
|
||||
if(CHECK_CACHE && CheckCache(mem))
|
||||
{
|
||||
writeCache128(mem, value);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
CopyQWC((void*)vmv.assumePtr(mem), value);
|
||||
r128_store_unaligned((void*)vmv.assumePtr(mem), value);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -311,9 +271,11 @@ void vtlb_memWrite128(u32 mem, const mem128_t *value)
|
|||
template mem8_t vtlb_memRead<mem8_t>(u32 mem);
|
||||
template mem16_t vtlb_memRead<mem16_t>(u32 mem);
|
||||
template mem32_t vtlb_memRead<mem32_t>(u32 mem);
|
||||
template mem64_t vtlb_memRead<mem64_t>(u32 mem);
|
||||
template void vtlb_memWrite<mem8_t>(u32 mem, mem8_t data);
|
||||
template void vtlb_memWrite<mem16_t>(u32 mem, mem16_t data);
|
||||
template void vtlb_memWrite<mem32_t>(u32 mem, mem32_t data);
|
||||
template void vtlb_memWrite<mem64_t>(u32 mem, mem64_t data);
|
||||
|
||||
template <typename DataType>
|
||||
bool vtlb_ramRead(u32 addr, DataType* value)
|
||||
|
@ -482,7 +444,7 @@ template<typename OperandType, u32 saddr>
|
|||
void vtlbUnmappedVWriteSm(u32 addr,OperandType data) { vtlb_Miss(addr|saddr,1); }
|
||||
|
||||
template<typename OperandType, u32 saddr>
|
||||
void vtlbUnmappedVWriteLg(u32 addr,const OperandType* data) { vtlb_Miss(addr|saddr,1); }
|
||||
void __vectorcall vtlbUnmappedVWriteLg(u32 addr,u_to_r<OperandType> data) { vtlb_Miss(addr|saddr,1); }
|
||||
|
||||
template<typename OperandType, u32 saddr>
|
||||
OperandType vtlbUnmappedPReadSm(u32 addr) { vtlb_BusError(addr|saddr,0); return 0; }
|
||||
|
@ -494,7 +456,7 @@ template<typename OperandType, u32 saddr>
|
|||
void vtlbUnmappedPWriteSm(u32 addr,OperandType data) { vtlb_BusError(addr|saddr,1); }
|
||||
|
||||
template<typename OperandType, u32 saddr>
|
||||
void vtlbUnmappedPWriteLg(u32 addr,const OperandType* data) { vtlb_BusError(addr|saddr,1); }
|
||||
void __vectorcall vtlbUnmappedPWriteLg(u32 addr,u_to_r<OperandType> data) { vtlb_BusError(addr|saddr,1); }
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// VTLB mapping errors
|
||||
|
@ -521,13 +483,13 @@ static mem32_t vtlbDefaultPhyRead32(u32 addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __m128i __vectorcall vtlbDefaultPhyRead64(u32 addr)
|
||||
static mem64_t vtlbDefaultPhyRead64(u32 addr)
|
||||
{
|
||||
pxFailDev(fmt::format("(VTLB) Attempted read64 from unmapped physical address @ 0x{:08X}.", addr).c_str());
|
||||
return r64_zero();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __m128i __vectorcall vtlbDefaultPhyRead128(u32 addr)
|
||||
static RETURNS_R128 vtlbDefaultPhyRead128(u32 addr)
|
||||
{
|
||||
pxFailDev(fmt::format("(VTLB) Attempted read128 from unmapped physical address @ 0x{:08X}.", addr).c_str());
|
||||
return r128_zero();
|
||||
|
@ -548,12 +510,12 @@ static void vtlbDefaultPhyWrite32(u32 addr, mem32_t data)
|
|||
pxFailDev(fmt::format("(VTLB) Attempted write32 to unmapped physical address @ 0x{:08X}.", addr).c_str());
|
||||
}
|
||||
|
||||
static void vtlbDefaultPhyWrite64(u32 addr,const mem64_t* data)
|
||||
static void vtlbDefaultPhyWrite64(u32 addr,mem64_t data)
|
||||
{
|
||||
pxFailDev(fmt::format("(VTLB) Attempted write64 to unmapped physical address @ 0x{:08X}.", addr).c_str());
|
||||
}
|
||||
|
||||
static void vtlbDefaultPhyWrite128(u32 addr,const mem128_t* data)
|
||||
static void TAKES_R128 vtlbDefaultPhyWrite128(u32 addr,r128 data)
|
||||
{
|
||||
pxFailDev(fmt::format("(VTLB) Attempted write128 to unmapped physical address @ 0x{:08X}.", addr).c_str());
|
||||
}
|
||||
|
@ -773,9 +735,9 @@ void vtlb_Init()
|
|||
|
||||
#define VTLB_BuildUnmappedHandler(baseName, highBit) \
|
||||
baseName##ReadSm<mem8_t,0>, baseName##ReadSm<mem16_t,0>, baseName##ReadSm<mem32_t,0>, \
|
||||
baseName##ReadLg<mem64_t,0>, baseName##ReadLg<mem128_t,0>, \
|
||||
baseName##ReadSm<mem64_t,0>, baseName##ReadLg<mem128_t,0>, \
|
||||
baseName##WriteSm<mem8_t,0>, baseName##WriteSm<mem16_t,0>, baseName##WriteSm<mem32_t,0>, \
|
||||
baseName##WriteLg<mem64_t,0>, baseName##WriteLg<mem128_t,0>
|
||||
baseName##WriteSm<mem64_t,0>, baseName##WriteLg<mem128_t,0>
|
||||
|
||||
//Register default handlers
|
||||
//Unmapped Virt handlers _MUST_ be registered first.
|
||||
|
@ -816,9 +778,8 @@ void vtlb_Reset()
|
|||
for(int i=0; i<48; i++) UnmapTLB(i);
|
||||
}
|
||||
|
||||
void vtlb_Term()
|
||||
void vtlb_Shutdown()
|
||||
{
|
||||
//nothing to do for now
|
||||
}
|
||||
|
||||
static constexpr size_t VMAP_SIZE = sizeof(VTLBVirtual) * VTLB_VMAP_ITEMS;
|
||||
|
@ -832,17 +793,16 @@ void vtlb_Core_Alloc()
|
|||
// Can't return regions to the bump allocator
|
||||
static VTLBVirtual* vmap = nullptr;
|
||||
if (!vmap)
|
||||
{
|
||||
vmap = (VTLBVirtual*)GetVmMemory().BumpAllocator().Alloc(VMAP_SIZE);
|
||||
if (!vmap)
|
||||
pxFailRel("Failed to allocate vtlb vmap");
|
||||
}
|
||||
|
||||
if (!vtlbdata.vmap)
|
||||
{
|
||||
bool okay = HostSys::MmapCommitPtr(vmap, VMAP_SIZE, PageProtectionMode().Read().Write());
|
||||
if (okay) {
|
||||
vtlbdata.vmap = vmap;
|
||||
} else {
|
||||
throw Exception::OutOfMemory( "VTLB Virtual Address Translation LUT" )
|
||||
.SetDiagMsg(fmt::format("({} megs)", VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap) / _1mb)
|
||||
);
|
||||
}
|
||||
HostSys::MemProtect(vmap, VMAP_SIZE, PageProtectionMode().Read().Write());
|
||||
vtlbdata.vmap = vmap;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -860,12 +820,8 @@ void vtlb_Alloc_Ppmap()
|
|||
if (!ppmap)
|
||||
ppmap = (u32*)GetVmMemory().BumpAllocator().Alloc(PPMAP_SIZE);
|
||||
|
||||
bool okay = HostSys::MmapCommitPtr(ppmap, PPMAP_SIZE, PageProtectionMode().Read().Write());
|
||||
if (okay)
|
||||
vtlbdata.ppmap = ppmap;
|
||||
else
|
||||
throw Exception::OutOfMemory("VTLB PS2 Virtual Address Translation LUT")
|
||||
.SetDiagMsg(fmt::format("({} megs)", PPMAP_SIZE / _1mb));
|
||||
HostSys::MemProtect(ppmap, PPMAP_SIZE, PageProtectionMode().Read().Write());
|
||||
vtlbdata.ppmap = ppmap;
|
||||
|
||||
// By default a 1:1 virtual to physical mapping
|
||||
for (u32 i = 0; i < VTLB_VMAP_ITEMS; i++)
|
||||
|
@ -876,12 +832,12 @@ void vtlb_Core_Free()
|
|||
{
|
||||
if (vtlbdata.vmap)
|
||||
{
|
||||
HostSys::MmapResetPtr(vtlbdata.vmap, VMAP_SIZE);
|
||||
HostSys::MemProtect(vtlbdata.vmap, VMAP_SIZE, PageProtectionMode());
|
||||
vtlbdata.vmap = nullptr;
|
||||
}
|
||||
if (vtlbdata.ppmap)
|
||||
{
|
||||
HostSys::MmapResetPtr(vtlbdata.ppmap, PPMAP_SIZE);
|
||||
HostSys::MemProtect(vtlbdata.ppmap, PPMAP_SIZE, PageProtectionMode());
|
||||
vtlbdata.ppmap = nullptr;
|
||||
}
|
||||
}
|
||||
|
@ -893,45 +849,28 @@ static std::string GetHostVmErrorMsg()
|
|||
// --------------------------------------------------------------------------------------
|
||||
// VtlbMemoryReserve (implementations)
|
||||
// --------------------------------------------------------------------------------------
|
||||
VtlbMemoryReserve::VtlbMemoryReserve( std::string name, size_t size )
|
||||
: m_reserve( std::move(name), size )
|
||||
VtlbMemoryReserve::VtlbMemoryReserve(std::string name)
|
||||
: VirtualMemoryReserve(std::move(name))
|
||||
{
|
||||
m_reserve.SetPageAccessOnCommit( PageAccess_ReadWrite() );
|
||||
}
|
||||
|
||||
void VtlbMemoryReserve::Reserve( VirtualMemoryManagerPtr allocator, sptr offset )
|
||||
void VtlbMemoryReserve::Assign(VirtualMemoryManagerPtr allocator, size_t offset, size_t size)
|
||||
{
|
||||
if (!m_reserve.Reserve( std::move(allocator), offset ))
|
||||
{
|
||||
throw Exception::OutOfMemory( m_reserve.GetName() )
|
||||
.SetDiagMsg("Vtlb memory could not be reserved.")
|
||||
.SetUserMsg(GetHostVmErrorMsg());
|
||||
}
|
||||
}
|
||||
// Anything passed to the memory allocator must be page aligned.
|
||||
size = Common::PageAlign(size);
|
||||
|
||||
void VtlbMemoryReserve::Commit()
|
||||
{
|
||||
if (IsCommitted()) return;
|
||||
if (!m_reserve.Commit())
|
||||
// Since the memory has already been allocated as part of the main memory map, this should never fail.
|
||||
u8* base = allocator->Alloc(offset, size);
|
||||
if (!base)
|
||||
{
|
||||
throw Exception::OutOfMemory( m_reserve.GetName() )
|
||||
.SetDiagMsg("Vtlb memory could not be committed.")
|
||||
.SetUserMsg(GetHostVmErrorMsg());
|
||||
Console.WriteLn("(VtlbMemoryReserve) Failed to allocate %zu bytes for %s at offset %zu", size, m_name.c_str(), offset);
|
||||
pxFailRel("VtlbMemoryReserve allocation failed.");
|
||||
}
|
||||
|
||||
VirtualMemoryReserve::Assign(std::move(allocator), base, size);
|
||||
}
|
||||
|
||||
void VtlbMemoryReserve::Reset()
|
||||
{
|
||||
Commit();
|
||||
memzero_sse_a(m_reserve.GetPtr(), m_reserve.GetCommittedBytes());
|
||||
}
|
||||
|
||||
void VtlbMemoryReserve::Decommit()
|
||||
{
|
||||
m_reserve.Reset();
|
||||
}
|
||||
|
||||
bool VtlbMemoryReserve::IsCommitted() const
|
||||
{
|
||||
return !!m_reserve.GetCommittedPageCount();
|
||||
memzero_sse_a(GetPtr(), GetSize());
|
||||
}
|
||||
|
|
56
pcsx2/vtlb.h
56
pcsx2/vtlb.h
|
@ -26,15 +26,15 @@ static const uptr VTLB_AllocUpperBounds = _1gb * 2;
|
|||
typedef mem8_t vtlbMemR8FP(u32 addr);
|
||||
typedef mem16_t vtlbMemR16FP(u32 addr);
|
||||
typedef mem32_t vtlbMemR32FP(u32 addr);
|
||||
typedef RETURNS_R64 vtlbMemR64FP(u32 addr);
|
||||
typedef mem64_t vtlbMemR64FP(u32 addr);
|
||||
typedef RETURNS_R128 vtlbMemR128FP(u32 addr);
|
||||
|
||||
// Specialized function pointers for each write type
|
||||
typedef void vtlbMemW8FP(u32 addr,mem8_t data);
|
||||
typedef void vtlbMemW16FP(u32 addr,mem16_t data);
|
||||
typedef void vtlbMemW32FP(u32 addr,mem32_t data);
|
||||
typedef void vtlbMemW64FP(u32 addr,const mem64_t* data);
|
||||
typedef void vtlbMemW128FP(u32 addr,const mem128_t* data);
|
||||
typedef void vtlbMemW64FP(u32 addr,mem64_t data);
|
||||
typedef void TAKES_R128 vtlbMemW128FP(u32 addr,r128 data);
|
||||
|
||||
template <size_t Width, bool Write> struct vtlbMemFP;
|
||||
|
||||
|
@ -55,9 +55,8 @@ extern void vtlb_Core_Alloc();
|
|||
extern void vtlb_Core_Free();
|
||||
extern void vtlb_Alloc_Ppmap();
|
||||
extern void vtlb_Init();
|
||||
extern void vtlb_Shutdown();
|
||||
extern void vtlb_Reset();
|
||||
extern void vtlb_Term();
|
||||
|
||||
|
||||
extern vtlbHandler vtlb_NewHandler();
|
||||
|
||||
|
@ -88,13 +87,11 @@ extern void vtlb_VMapUnmap(u32 vaddr,u32 sz);
|
|||
|
||||
template< typename DataType >
|
||||
extern DataType vtlb_memRead(u32 mem);
|
||||
extern RETURNS_R64 vtlb_memRead64(u32 mem);
|
||||
extern RETURNS_R128 vtlb_memRead128(u32 mem);
|
||||
|
||||
template< typename DataType >
|
||||
extern void vtlb_memWrite(u32 mem, DataType value);
|
||||
extern void vtlb_memWrite64(u32 mem, const mem64_t* value);
|
||||
extern void vtlb_memWrite128(u32 mem, const mem128_t* value);
|
||||
extern void TAKES_R128 vtlb_memWrite128(u32 mem, r128 value);
|
||||
|
||||
// "Safe" variants of vtlb, designed for external tools.
|
||||
// These routines only access the various RAM, and will not call handlers
|
||||
|
@ -105,37 +102,30 @@ template <typename DataType>
|
|||
extern bool vtlb_ramWrite(u32 mem, const DataType& value);
|
||||
|
||||
extern void vtlb_DynGenWrite(u32 sz);
|
||||
extern void vtlb_DynGenRead32(u32 bits, bool sign);
|
||||
extern int vtlb_DynGenRead64(u32 sz, int gpr);
|
||||
extern void vtlb_DynGenReadNonQuad(u32 bits, bool sign);
|
||||
extern int vtlb_DynGenReadQuad(u32 sz, int gpr);
|
||||
|
||||
extern void vtlb_DynGenWrite_Const( u32 bits, u32 addr_const );
|
||||
extern int vtlb_DynGenRead64_Const( u32 bits, u32 addr_const, int gpr );
|
||||
extern void vtlb_DynGenRead32_Const( u32 bits, bool sign, u32 addr_const );
|
||||
extern int vtlb_DynGenReadQuad_Const( u32 bits, u32 addr_const, int gpr );
|
||||
extern void vtlb_DynGenReadNonQuad_Const( u32 bits, bool sign, u32 addr_const );
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// VtlbMemoryReserve
|
||||
// --------------------------------------------------------------------------------------
|
||||
class VtlbMemoryReserve
|
||||
class VtlbMemoryReserve : public VirtualMemoryReserve
|
||||
{
|
||||
protected:
|
||||
VirtualMemoryReserve m_reserve;
|
||||
|
||||
public:
|
||||
VtlbMemoryReserve( std::string name, size_t size );
|
||||
VtlbMemoryReserve(std::string name);
|
||||
|
||||
void Reserve( VirtualMemoryManagerPtr allocator, sptr offset );
|
||||
void Assign(VirtualMemoryManagerPtr allocator, size_t offset, size_t size);
|
||||
|
||||
virtual void Commit();
|
||||
virtual void Reset();
|
||||
virtual void Decommit();
|
||||
|
||||
bool IsCommitted() const;
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// eeMemoryReserve
|
||||
// --------------------------------------------------------------------------------------
|
||||
class eeMemoryReserve : public VtlbMemoryReserve
|
||||
class eeMemoryReserve : private VtlbMemoryReserve
|
||||
{
|
||||
typedef VtlbMemoryReserve _parent;
|
||||
|
||||
|
@ -143,32 +133,33 @@ public:
|
|||
eeMemoryReserve();
|
||||
~eeMemoryReserve();
|
||||
|
||||
void Reserve(VirtualMemoryManagerPtr allocator);
|
||||
void Commit() override;
|
||||
void Decommit() override;
|
||||
void Assign(VirtualMemoryManagerPtr allocator);
|
||||
void Release();
|
||||
|
||||
void Reset() override;
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// iopMemoryReserve
|
||||
// --------------------------------------------------------------------------------------
|
||||
class iopMemoryReserve : public VtlbMemoryReserve
|
||||
class iopMemoryReserve : private VtlbMemoryReserve
|
||||
{
|
||||
typedef VtlbMemoryReserve _parent;
|
||||
|
||||
public:
|
||||
iopMemoryReserve();
|
||||
~iopMemoryReserve();
|
||||
|
||||
void Assign(VirtualMemoryManagerPtr allocator);
|
||||
void Release();
|
||||
|
||||
void Reserve(VirtualMemoryManagerPtr allocator);
|
||||
void Commit() override;
|
||||
void Decommit() override;
|
||||
void Reset() override;
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// vuMemoryReserve
|
||||
// --------------------------------------------------------------------------------------
|
||||
class vuMemoryReserve : public VtlbMemoryReserve
|
||||
class vuMemoryReserve : private VtlbMemoryReserve
|
||||
{
|
||||
typedef VtlbMemoryReserve _parent;
|
||||
|
||||
|
@ -176,7 +167,8 @@ public:
|
|||
vuMemoryReserve();
|
||||
~vuMemoryReserve();
|
||||
|
||||
void Reserve(VirtualMemoryManagerPtr allocator);
|
||||
void Assign(VirtualMemoryManagerPtr allocator);
|
||||
void Release();
|
||||
|
||||
void Reset() override;
|
||||
};
|
||||
|
|
|
@ -688,7 +688,6 @@ void psxRecompileCodeConst3(R3000AFNPTR constcode, R3000AFNPTR_INFO constscode,
|
|||
noconstcode(0);
|
||||
}
|
||||
|
||||
static uptr m_ConfiguredCacheReserve = 32;
|
||||
static u8* m_recBlockAlloc = NULL;
|
||||
|
||||
static const uint m_recBlockAllocSize =
|
||||
|
@ -696,22 +695,12 @@ static const uint m_recBlockAllocSize =
|
|||
|
||||
static void recReserveCache()
|
||||
{
|
||||
if (!recMem)
|
||||
recMem = new RecompiledCodeReserve("R3000A Recompiler Cache", _8mb);
|
||||
if (recMem)
|
||||
return;
|
||||
|
||||
recMem = new RecompiledCodeReserve("R3000A Recompiler Cache");
|
||||
recMem->SetProfilerName("IOPrec");
|
||||
|
||||
while (!recMem->IsOk())
|
||||
{
|
||||
if (recMem->Reserve(GetVmMemory().MainMemory(), HostMemoryMap::IOPrecOffset, m_ConfiguredCacheReserve * _1mb) != NULL)
|
||||
break;
|
||||
|
||||
// If it failed, then try again (if possible):
|
||||
if (m_ConfiguredCacheReserve < 4)
|
||||
break;
|
||||
m_ConfiguredCacheReserve /= 2;
|
||||
}
|
||||
|
||||
recMem->ThrowIfNotOk();
|
||||
recMem->Assign(GetVmMemory().CodeMemory(), HostMemoryMap::IOPrecOffset, 32 * _1mb);
|
||||
}
|
||||
|
||||
static void recReserve()
|
||||
|
@ -1561,23 +1550,10 @@ StartRecomp:
|
|||
s_pCurBlockEx = NULL;
|
||||
}
|
||||
|
||||
static void recSetCacheReserve(uint reserveInMegs)
|
||||
{
|
||||
m_ConfiguredCacheReserve = reserveInMegs;
|
||||
}
|
||||
|
||||
static uint recGetCacheReserve()
|
||||
{
|
||||
return m_ConfiguredCacheReserve;
|
||||
}
|
||||
|
||||
R3000Acpu psxRec = {
|
||||
recReserve,
|
||||
recResetIOP,
|
||||
recExecuteBlock,
|
||||
recClearIOP,
|
||||
recShutdown,
|
||||
|
||||
recGetCacheReserve,
|
||||
recSetCacheReserve
|
||||
};
|
||||
|
|
|
@ -111,6 +111,7 @@ u32* _eeGetConstReg(int reg);
|
|||
|
||||
// finds where the GPR is stored and moves lower 32 bits to EAX
|
||||
void _eeMoveGPRtoR(const x86Emitter::xRegister32& to, int fromgpr);
|
||||
void _eeMoveGPRtoR(const x86Emitter::xRegister64& to, int fromgpr);
|
||||
void _eeMoveGPRtoM(uptr to, int fromgpr);
|
||||
void _eeMoveGPRtoRm(x86IntRegType to, int fromgpr);
|
||||
void _signExtendToMem(void* mem);
|
||||
|
|
|
@ -47,6 +47,8 @@
|
|||
#include "common/MemsetFast.inl"
|
||||
#include "common/Perf.h"
|
||||
|
||||
// Only for MOVQ workaround.
|
||||
#include "common/emitter/internal.h"
|
||||
|
||||
using namespace x86Emitter;
|
||||
using namespace R5900;
|
||||
|
@ -90,8 +92,6 @@ static u8* recRAMCopy = NULL;
|
|||
static u8* recLutReserve_RAM = NULL;
|
||||
static const size_t recLutSize = (Ps2MemSize::MainRam + Ps2MemSize::Rom + Ps2MemSize::Rom1 + Ps2MemSize::Rom2) * wordsize / 4;
|
||||
|
||||
static uptr m_ConfiguredCacheReserve = 64;
|
||||
|
||||
alignas(16) static u32 recConstBuf[RECCONSTBUF_SIZE]; // 64-bit pseudo-immediates
|
||||
static BASEBLOCK* recRAM = NULL; // and the ptr to the blocks here
|
||||
static BASEBLOCK* recROM = NULL; // and here
|
||||
|
@ -191,6 +191,27 @@ void _eeMoveGPRtoR(const xRegister32& to, int fromgpr)
|
|||
}
|
||||
}
|
||||
|
||||
void _eeMoveGPRtoR(const xRegister64& to, int fromgpr)
|
||||
{
|
||||
if (fromgpr == 0)
|
||||
xXOR(to, to);
|
||||
else if (GPR_IS_CONST1(fromgpr))
|
||||
xMOV64(to, g_cpuConstRegs[fromgpr].UD[0]);
|
||||
else
|
||||
{
|
||||
int mmreg;
|
||||
|
||||
if ((mmreg = _checkXMMreg(XMMTYPE_GPRREG, fromgpr, MODE_READ)) >= 0 && (xmmregs[mmreg].mode & MODE_WRITE))
|
||||
{
|
||||
xMOVD(to, xRegisterSSE(mmreg));
|
||||
}
|
||||
else
|
||||
{
|
||||
xMOV(to, ptr[&cpuRegs.GPR.r[fromgpr].UD[0]]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void _eeMoveGPRtoM(uptr to, int fromgpr)
|
||||
{
|
||||
if (GPR_IS_CONST1(fromgpr))
|
||||
|
@ -532,22 +553,12 @@ static void recThrowHardwareDeficiency(const char* extFail)
|
|||
|
||||
static void recReserveCache()
|
||||
{
|
||||
if (!recMem)
|
||||
recMem = new RecompiledCodeReserve("R5900-32 Recompiler Cache", _16mb);
|
||||
if (recMem)
|
||||
return;
|
||||
|
||||
recMem = new RecompiledCodeReserve("R5900 Recompiler Cache");
|
||||
recMem->SetProfilerName("EErec");
|
||||
|
||||
while (!recMem->IsOk())
|
||||
{
|
||||
if (recMem->Reserve(GetVmMemory().MainMemory(), HostMemoryMap::EErecOffset, m_ConfiguredCacheReserve * _1mb) != NULL)
|
||||
break;
|
||||
|
||||
// If it failed, then try again (if possible):
|
||||
if (m_ConfiguredCacheReserve < 16)
|
||||
break;
|
||||
m_ConfiguredCacheReserve /= 2;
|
||||
}
|
||||
|
||||
recMem->ThrowIfNotOk();
|
||||
recMem->Assign(GetVmMemory().CodeMemory(), HostMemoryMap::EErecOffset, 64 * _1mb);
|
||||
}
|
||||
|
||||
static void recReserve()
|
||||
|
@ -2398,16 +2409,6 @@ static void recThrowException(const BaseException& ex)
|
|||
recExitExecution();
|
||||
}
|
||||
|
||||
static void recSetCacheReserve(uint reserveInMegs)
|
||||
{
|
||||
m_ConfiguredCacheReserve = reserveInMegs;
|
||||
}
|
||||
|
||||
static uint recGetCacheReserve()
|
||||
{
|
||||
return m_ConfiguredCacheReserve;
|
||||
}
|
||||
|
||||
R5900cpu recCpu =
|
||||
{
|
||||
recReserve,
|
||||
|
@ -2420,8 +2421,5 @@ R5900cpu recCpu =
|
|||
recSafeExitExecution,
|
||||
recThrowException,
|
||||
recThrowException,
|
||||
recClear,
|
||||
|
||||
recGetCacheReserve,
|
||||
recSetCacheReserve,
|
||||
recClear
|
||||
};
|
||||
|
|
|
@ -100,14 +100,14 @@ alignas(16) u32 dummyValue[4];
|
|||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
void recLoad64(u32 bits, bool sign)
|
||||
static void recLoadQuad(u32 bits, bool sign)
|
||||
{
|
||||
pxAssume(bits == 64 || bits == 128);
|
||||
pxAssume(bits == 128);
|
||||
|
||||
// Load arg2 with the destination.
|
||||
// 64/128 bit modes load the result directly into the cpuRegs.GPR struct.
|
||||
|
||||
int gprreg = ((bits == 128) && _Rt_) ? _Rt_ : -1;
|
||||
int gprreg = _Rt_ ? _Rt_ : -1;
|
||||
int reg;
|
||||
|
||||
if (GPR_IS_CONST1(_Rs_))
|
||||
|
@ -119,12 +119,12 @@ void recLoad64(u32 bits, bool sign)
|
|||
_eeOnLoadWrite(_Rt_);
|
||||
_deleteEEreg(_Rt_, 0);
|
||||
|
||||
reg = vtlb_DynGenRead64_Const(bits, srcadr, gprreg);
|
||||
reg = vtlb_DynGenReadQuad_Const(bits, srcadr, gprreg);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Load ECX with the source memory address that we're reading from.
|
||||
_eeMoveGPRtoR(arg1regd, _Rs_);
|
||||
_eeMoveGPRtoR(arg1reg, _Rs_);
|
||||
if (_Imm_ != 0)
|
||||
xADD(arg1regd, _Imm_);
|
||||
if (bits == 128) // force 16 byte alignment on 128 bit reads
|
||||
|
@ -134,7 +134,7 @@ void recLoad64(u32 bits, bool sign)
|
|||
_deleteEEreg(_Rt_, 0);
|
||||
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
reg = vtlb_DynGenRead64(bits, gprreg);
|
||||
reg = vtlb_DynGenReadQuad(bits, gprreg);
|
||||
}
|
||||
|
||||
if (gprreg == -1)
|
||||
|
@ -148,9 +148,9 @@ void recLoad64(u32 bits, bool sign)
|
|||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
void recLoad32(u32 bits, bool sign)
|
||||
static void recLoadNonQuad(u32 bits, bool sign)
|
||||
{
|
||||
pxAssume(bits <= 32);
|
||||
pxAssume(bits <= 64);
|
||||
|
||||
// 8/16/32 bit modes return the loaded value in EAX.
|
||||
|
||||
|
@ -161,7 +161,7 @@ void recLoad32(u32 bits, bool sign)
|
|||
_eeOnLoadWrite(_Rt_);
|
||||
_deleteEEreg(_Rt_, 0);
|
||||
|
||||
vtlb_DynGenRead32_Const(bits, sign, srcadr);
|
||||
vtlb_DynGenReadNonQuad_Const(bits, sign, srcadr);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -174,23 +174,17 @@ void recLoad32(u32 bits, bool sign)
|
|||
_deleteEEreg(_Rt_, 0);
|
||||
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
vtlb_DynGenRead32(bits, sign);
|
||||
vtlb_DynGenReadNonQuad(bits, sign);
|
||||
}
|
||||
|
||||
if (_Rt_)
|
||||
{
|
||||
// EAX holds the loaded value, so sign extend as needed:
|
||||
if (sign)
|
||||
xCDQE();
|
||||
|
||||
xMOV(ptr64[&cpuRegs.GPR.r[_Rt_].UD[0]], rax);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
|
||||
void recStore(u32 bits)
|
||||
static void recStore(u32 bits)
|
||||
{
|
||||
// Performance note: Const prop for the store address is good, always.
|
||||
// Constprop for the value being stored is not really worthwhile (better to use register
|
||||
|
@ -199,14 +193,16 @@ void recStore(u32 bits)
|
|||
// Load EDX first with the value being written, or the address of the value
|
||||
// being written (64/128 bit modes).
|
||||
|
||||
if (bits < 64)
|
||||
if (bits < 128)
|
||||
{
|
||||
_eeMoveGPRtoR(arg2regd, _Rt_);
|
||||
_eeMoveGPRtoR(arg2reg, _Rt_);
|
||||
}
|
||||
else if (bits == 128 || bits == 64)
|
||||
else
|
||||
{
|
||||
_flushEEreg(_Rt_); // flush register to mem
|
||||
xLEA(arg2reg, ptr[&cpuRegs.GPR.r[_Rt_].UL[0]]);
|
||||
int rpreg = _allocTempXMMreg(XMMT_INT, 1);
|
||||
xMOVAPS(xRegisterSSE(rpreg), ptr128[&cpuRegs.GPR.r[_Rt_].UL[0]]);
|
||||
_freeXMMreg(rpreg);
|
||||
}
|
||||
|
||||
// Load ECX with the destination address, or issue a direct optimized write
|
||||
|
@ -237,14 +233,14 @@ void recStore(u32 bits)
|
|||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
void recLB() { recLoad32( 8, true); EE::Profiler.EmitOp(eeOpcode::LB); }
|
||||
void recLBU() { recLoad32( 8, false); EE::Profiler.EmitOp(eeOpcode::LBU); }
|
||||
void recLH() { recLoad32( 16, true); EE::Profiler.EmitOp(eeOpcode::LH); }
|
||||
void recLHU() { recLoad32( 16, false); EE::Profiler.EmitOp(eeOpcode::LHU); }
|
||||
void recLW() { recLoad32( 32, true); EE::Profiler.EmitOp(eeOpcode::LW); }
|
||||
void recLWU() { recLoad32( 32, false); EE::Profiler.EmitOp(eeOpcode::LWU); }
|
||||
void recLD() { recLoad64( 64, false); EE::Profiler.EmitOp(eeOpcode::LD); }
|
||||
void recLQ() { recLoad64(128, false); EE::Profiler.EmitOp(eeOpcode::LQ); }
|
||||
void recLB() { recLoadNonQuad( 8, true); EE::Profiler.EmitOp(eeOpcode::LB); }
|
||||
void recLBU() { recLoadNonQuad( 8, false); EE::Profiler.EmitOp(eeOpcode::LBU); }
|
||||
void recLH() { recLoadNonQuad( 16, true); EE::Profiler.EmitOp(eeOpcode::LH); }
|
||||
void recLHU() { recLoadNonQuad( 16, false); EE::Profiler.EmitOp(eeOpcode::LHU); }
|
||||
void recLW() { recLoadNonQuad( 32, true); EE::Profiler.EmitOp(eeOpcode::LW); }
|
||||
void recLWU() { recLoadNonQuad( 32, false); EE::Profiler.EmitOp(eeOpcode::LWU); }
|
||||
void recLD() { recLoadNonQuad( 64, false); EE::Profiler.EmitOp(eeOpcode::LD); }
|
||||
void recLQ() { recLoadQuad(128, false); EE::Profiler.EmitOp(eeOpcode::LQ); }
|
||||
|
||||
void recSB() { recStore( 8); EE::Profiler.EmitOp(eeOpcode::SB); }
|
||||
void recSH() { recStore( 16); EE::Profiler.EmitOp(eeOpcode::SH); }
|
||||
|
@ -270,7 +266,7 @@ void recLWL()
|
|||
xSHL(calleeSavedReg1d, 3);
|
||||
|
||||
xAND(arg1regd, ~3);
|
||||
vtlb_DynGenRead32(32, false);
|
||||
vtlb_DynGenReadNonQuad(32, false);
|
||||
|
||||
if (!_Rt_)
|
||||
return;
|
||||
|
@ -314,7 +310,7 @@ void recLWR()
|
|||
xMOV(calleeSavedReg1d, arg1regd);
|
||||
|
||||
xAND(arg1regd, ~3);
|
||||
vtlb_DynGenRead32(32, false);
|
||||
vtlb_DynGenReadNonQuad(32, false);
|
||||
|
||||
if (!_Rt_)
|
||||
return;
|
||||
|
@ -367,7 +363,7 @@ void recSWL()
|
|||
xForwardJE8 skip;
|
||||
xSHL(calleeSavedReg1d, 3);
|
||||
|
||||
vtlb_DynGenRead32(32, false);
|
||||
vtlb_DynGenReadNonQuad(32, false);
|
||||
|
||||
// mask read -> arg2
|
||||
xMOV(ecx, calleeSavedReg1d);
|
||||
|
@ -423,7 +419,7 @@ void recSWR()
|
|||
xForwardJE8 skip;
|
||||
xSHL(calleeSavedReg1d, 3);
|
||||
|
||||
vtlb_DynGenRead32(32, false);
|
||||
vtlb_DynGenReadNonQuad(32, false);
|
||||
|
||||
// mask read -> edx
|
||||
xMOV(ecx, 24);
|
||||
|
@ -465,40 +461,36 @@ void recSWR()
|
|||
////////////////////////////////////////////////////
|
||||
|
||||
/// Masks rt with (0xffffffffffffffff maskshift maskamt), merges with (value shift amt), leaves result in value
|
||||
static void ldlrhelper_const(int maskamt, const xImplSimd_Shift& maskshift, int amt, const xImplSimd_Shift& shift, const xRegisterSSE& value, const xRegisterSSE& rt)
|
||||
static void ldlrhelper_const(int maskamt, const xImpl_Group2& maskshift, int amt, const xImpl_Group2& shift, const xRegister64& value, const xRegister64& rt)
|
||||
{
|
||||
int t0reg = _allocTempXMMreg(XMMT_INT, -1);
|
||||
xRegisterSSE t0(t0reg);
|
||||
pxAssert(rt.GetId() != ecx.GetId() && value.GetId() != ecx.GetId());
|
||||
_freeX86reg(ecx);
|
||||
xMOV(rcx, -1);
|
||||
maskshift(rcx, maskamt);
|
||||
xAND(rt, rcx);
|
||||
|
||||
xPCMP.EQD(t0, t0);
|
||||
maskshift.Q(t0, maskamt);
|
||||
xPAND(t0, rt);
|
||||
|
||||
shift.Q(value, amt);
|
||||
xPOR(value, t0);
|
||||
|
||||
_freeXMMreg(t0reg);
|
||||
shift(value, amt);
|
||||
xOR(value, rt);
|
||||
}
|
||||
|
||||
/// Masks rt with (0xffffffffffffffff maskshift maskamt), merges with (value shift amt), leaves result in value
|
||||
static void ldlrhelper(const xRegister32& maskamt, const xImplSimd_Shift& maskshift, const xRegister32& amt, const xImplSimd_Shift& shift, const xRegisterSSE& value, const xRegisterSSE& rt)
|
||||
static void ldlrhelper(const xRegister32& maskamt, const xImpl_Group2& maskshift, const xRegister32& amt, const xImpl_Group2& shift, const xRegister64& value, const xRegister64& rt)
|
||||
{
|
||||
int t0reg = _allocTempXMMreg(XMMT_INT, -1);
|
||||
int t1reg = _allocTempXMMreg(XMMT_INT, -1);
|
||||
xRegisterSSE t0(t0reg);
|
||||
xRegisterSSE t1(t1reg);
|
||||
pxAssert(rt.GetId() != ebx.GetId() && value.GetId() != ebx.GetId() && rt.GetId() != ecx.GetId() && value.GetId() != ecx.GetId());
|
||||
pxAssert(maskamt.GetId() != ebx.GetId() && amt.GetId() != ebx.GetId() && rt.GetId() != ecx.GetId() && value.GetId() != ecx.GetId());
|
||||
|
||||
xMOVDZX(t1, maskamt);
|
||||
xPCMP.EQD(t0, t0);
|
||||
maskshift.Q(t0, t1);
|
||||
xPAND(t0, rt);
|
||||
// TODO: Use actual register allocator here. Don't assume ebx is free.
|
||||
_freeX86reg(ebx);
|
||||
_freeX86reg(ecx);
|
||||
|
||||
xMOVDZX(t1, amt);
|
||||
shift.Q(value, t1);
|
||||
xPOR(value, t0);
|
||||
xMOV(rbx, -1);
|
||||
xMOV(ecx, maskamt);
|
||||
maskshift(rbx, cl);
|
||||
xAND(rt, rbx);
|
||||
|
||||
_freeXMMreg(t1reg);
|
||||
_freeXMMreg(t0reg);
|
||||
xMOV(ecx, amt);
|
||||
shift(value, cl);
|
||||
xOR(value, rt);
|
||||
}
|
||||
|
||||
void recLDL()
|
||||
|
@ -506,9 +498,7 @@ void recLDL()
|
|||
if (!_Rt_)
|
||||
return;
|
||||
|
||||
#ifdef LOADSTORE_RECOMPILE
|
||||
int t2reg;
|
||||
|
||||
#ifdef REC_LOADS
|
||||
if (GPR_IS_CONST1(_Rt_))
|
||||
{
|
||||
_flushConstReg(_Rt_);
|
||||
|
@ -525,7 +515,7 @@ void recLDL()
|
|||
|
||||
srcadr &= ~0x07;
|
||||
|
||||
t2reg = vtlb_DynGenRead64_Const(64, srcadr, -1);
|
||||
vtlb_DynGenReadNonQuad_Const(64, false, srcadr);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -539,10 +529,11 @@ void recLDL()
|
|||
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
|
||||
t2reg = vtlb_DynGenRead64(64, -1);
|
||||
vtlb_DynGenReadNonQuad(64, false);
|
||||
}
|
||||
|
||||
int rtreg = _allocGPRtoXMMreg(-1, _Rt_, MODE_READ | MODE_WRITE);
|
||||
_freeX86reg(calleeSavedReg2d);
|
||||
_eeMoveGPRtoR(calleeSavedReg2, _Rt_);
|
||||
|
||||
if (GPR_IS_CONST1(_Rs_))
|
||||
{
|
||||
|
@ -550,11 +541,13 @@ void recLDL()
|
|||
shift = ((shift & 0x7) + 1) * 8;
|
||||
if (shift != 64)
|
||||
{
|
||||
ldlrhelper_const(shift, xPSRL, 64 - shift, xPSLL, xRegisterSSE(t2reg), xRegisterSSE(rtreg));
|
||||
ldlrhelper_const(shift, xSHR, 64 - shift, xSHL, rax, calleeSavedReg2);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_freeX86reg(edx);
|
||||
|
||||
xAND(calleeSavedReg1d, 0x7);
|
||||
xCMP(calleeSavedReg1d, 7);
|
||||
xForwardJE8 skip;
|
||||
|
@ -564,14 +557,11 @@ void recLDL()
|
|||
xSHL(calleeSavedReg1d, 3);
|
||||
xSUB(edx, calleeSavedReg1d);
|
||||
|
||||
ldlrhelper(calleeSavedReg1d, xPSRL, edx, xPSLL, xRegisterSSE(t2reg), xRegisterSSE(rtreg));
|
||||
ldlrhelper(calleeSavedReg1d, xSHR, edx, xSHL, rax, calleeSavedReg2);
|
||||
skip.SetTarget();
|
||||
}
|
||||
xMOVSD(xRegisterSSE(rtreg), xRegisterSSE(t2reg));
|
||||
|
||||
_freeXMMreg(t2reg);
|
||||
_clearNeededXMMregs();
|
||||
|
||||
xMOV(ptr[&cpuRegs.GPR.r[_Rt_].UD[0]], rax);
|
||||
#else
|
||||
iFlushCall(FLUSH_INTERPRETER);
|
||||
_deleteEEreg(_Rs_, 1);
|
||||
|
@ -588,9 +578,7 @@ void recLDR()
|
|||
if (!_Rt_)
|
||||
return;
|
||||
|
||||
#ifdef LOADSTORE_RECOMPILE
|
||||
int t2reg;
|
||||
|
||||
#ifdef REC_LOADS
|
||||
if (GPR_IS_CONST1(_Rt_))
|
||||
{
|
||||
_flushConstReg(_Rt_);
|
||||
|
@ -607,7 +595,7 @@ void recLDR()
|
|||
|
||||
srcadr &= ~0x07;
|
||||
|
||||
t2reg = vtlb_DynGenRead64_Const(64, srcadr, -1);
|
||||
vtlb_DynGenReadNonQuad_Const(64, false, srcadr);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -621,10 +609,11 @@ void recLDR()
|
|||
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
|
||||
t2reg = vtlb_DynGenRead64(64, -1);
|
||||
vtlb_DynGenReadNonQuad(64, false);
|
||||
}
|
||||
|
||||
int rtreg = _allocGPRtoXMMreg(-1, _Rt_, MODE_READ | MODE_WRITE);
|
||||
_freeX86reg(calleeSavedReg2d);
|
||||
_eeMoveGPRtoR(calleeSavedReg2, _Rt_);
|
||||
|
||||
if (GPR_IS_CONST1(_Rs_))
|
||||
{
|
||||
|
@ -632,11 +621,13 @@ void recLDR()
|
|||
shift = (shift & 0x7) * 8;
|
||||
if (shift != 0)
|
||||
{
|
||||
ldlrhelper_const(64 - shift, xPSLL, shift, xPSRL, xRegisterSSE(t2reg), xRegisterSSE(rtreg));
|
||||
ldlrhelper_const(64 - shift, xSHL, shift, xSHR, rax, calleeSavedReg2);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
_freeX86reg(edx);
|
||||
|
||||
xAND(calleeSavedReg1d, 0x7);
|
||||
xForwardJE8 skip;
|
||||
// Calculate the shift from top bit to lowest
|
||||
|
@ -644,15 +635,11 @@ void recLDR()
|
|||
xSHL(calleeSavedReg1d, 3);
|
||||
xSUB(edx, calleeSavedReg1d);
|
||||
|
||||
ldlrhelper(edx, xPSLL, calleeSavedReg1d, xPSRL, xRegisterSSE(t2reg), xRegisterSSE(rtreg));
|
||||
ldlrhelper(edx, xSHL, calleeSavedReg1d, xSHR, rax, calleeSavedReg2);
|
||||
skip.SetTarget();
|
||||
}
|
||||
|
||||
xMOVSD(xRegisterSSE(rtreg), xRegisterSSE(t2reg));
|
||||
|
||||
_freeXMMreg(t2reg);
|
||||
_clearNeededXMMregs();
|
||||
|
||||
xMOV(ptr[&cpuRegs.GPR.r[_Rt_].UD[0]], rax);
|
||||
#else
|
||||
iFlushCall(FLUSH_INTERPRETER);
|
||||
_deleteEEreg(_Rs_, 1);
|
||||
|
@ -666,53 +653,43 @@ void recLDR()
|
|||
////////////////////////////////////////////////////
|
||||
|
||||
/// Masks value with (0xffffffffffffffff maskshift maskamt), merges with (rt shift amt), saves to dummyValue
|
||||
static void sdlrhelper_const(int maskamt, const xImplSimd_Shift& maskshift, int amt, const xImplSimd_Shift& shift, const xRegisterSSE& value, const xRegisterSSE& rt)
|
||||
static void sdlrhelper_const(int maskamt, const xImpl_Group2& maskshift, int amt, const xImpl_Group2& shift, const xRegister64& value, const xRegister64& rt)
|
||||
{
|
||||
int t0reg = _allocTempXMMreg(XMMT_INT, -1);
|
||||
xRegisterSSE t0(t0reg);
|
||||
pxAssert(rt.GetId() != ecx.GetId() && value.GetId() != ecx.GetId());
|
||||
_freeX86reg(ecx);
|
||||
xMOV(rcx, -1);
|
||||
maskshift(rcx, maskamt);
|
||||
xAND(rcx, value);
|
||||
|
||||
xPCMP.EQD(t0, t0);
|
||||
maskshift.Q(t0, maskamt);
|
||||
xPAND(t0, value);
|
||||
|
||||
shift.Q(rt, amt);
|
||||
xPOR(rt, t0);
|
||||
|
||||
xLEA(arg2reg, ptr[&dummyValue[0]]);
|
||||
xMOVQ(ptr64[arg2reg], rt);
|
||||
|
||||
_freeXMMreg(t0reg);
|
||||
shift(rt, amt);
|
||||
xOR(rt, rcx);
|
||||
}
|
||||
|
||||
/// Masks value with (0xffffffffffffffff maskshift maskamt), merges with (rt shift amt), saves to dummyValue
|
||||
static void sdlrhelper(const xRegister32& maskamt, const xImplSimd_Shift& maskshift, const xRegister32& amt, const xImplSimd_Shift& shift, const xRegisterSSE& value, const xRegisterSSE& rt)
|
||||
static void sdlrhelper(const xRegister32& maskamt, const xImpl_Group2& maskshift, const xRegister32& amt, const xImpl_Group2& shift, const xRegister64& value, const xRegister64& rt)
|
||||
{
|
||||
int t0reg = _allocTempXMMreg(XMMT_INT, -1);
|
||||
int t1reg = _allocTempXMMreg(XMMT_INT, -1);
|
||||
xRegisterSSE t0(t0reg);
|
||||
xRegisterSSE t1(t1reg);
|
||||
pxAssert(rt.GetId() != ebx.GetId() && value.GetId() != ebx.GetId() && rt.GetId() != ecx.GetId() && value.GetId() != ecx.GetId());
|
||||
pxAssert(maskamt.GetId() != ebx.GetId() && amt.GetId() != ebx.GetId() && rt.GetId() != ecx.GetId() && value.GetId() != ecx.GetId());
|
||||
|
||||
// TODO: Use actual register allocator here. Don't assume ebx is free.
|
||||
_freeX86reg(ebx);
|
||||
_freeX86reg(ecx);
|
||||
|
||||
// Generate mask 128-(shiftx8)
|
||||
xMOVDZX(t1, maskamt);
|
||||
xPCMP.EQD(t0, t0);
|
||||
maskshift.Q(t0, t1);
|
||||
xPAND(t0, value);
|
||||
xMOV(rbx, -1);
|
||||
xMOV(ecx, maskamt);
|
||||
maskshift(rbx, cl);
|
||||
xAND(rbx, value);
|
||||
|
||||
// Shift over reg value
|
||||
xMOVDZX(t1, amt);
|
||||
shift.Q(rt, t1);
|
||||
xPOR(rt, t0);
|
||||
|
||||
xLEA(arg2reg, ptr[&dummyValue[0]]);
|
||||
xMOVQ(ptr64[arg2reg], rt);
|
||||
|
||||
_freeXMMreg(t1reg);
|
||||
_freeXMMreg(t0reg);
|
||||
xMOV(ecx, amt);
|
||||
shift(rt, cl);
|
||||
xOR(rt, rbx);
|
||||
}
|
||||
|
||||
void recSDL()
|
||||
{
|
||||
#ifdef LOADSTORE_RECOMPILE
|
||||
#ifdef REC_STORES
|
||||
_flushEEreg(_Rt_); // flush register to mem
|
||||
if (GPR_IS_CONST1(_Rs_))
|
||||
{
|
||||
|
@ -721,53 +698,53 @@ void recSDL()
|
|||
u32 shift = ((adr & 0x7) + 1) * 8;
|
||||
if (shift == 64)
|
||||
{
|
||||
xLEA(arg2reg, ptr[&cpuRegs.GPR.r[_Rt_].UL[0]]);
|
||||
xMOV(arg2reg, ptr[&cpuRegs.GPR.r[_Rt_].UD[0]]);
|
||||
}
|
||||
else
|
||||
{
|
||||
int t2reg = vtlb_DynGenRead64_Const(64, aligned, -1);
|
||||
int rtreg = _allocGPRtoXMMreg(-1, _Rt_, MODE_READ);
|
||||
sdlrhelper_const(shift, xPSLL, 64 - shift, xPSRL, xRegisterSSE(t2reg), xRegisterSSE(rtreg));
|
||||
_deleteGPRtoXMMreg(_Rt_, 3);
|
||||
_freeXMMreg(t2reg);
|
||||
vtlb_DynGenReadNonQuad_Const(64, false, aligned);
|
||||
_eeMoveGPRtoR(arg2reg, _Rt_);
|
||||
sdlrhelper_const(shift, xSHL, 64 - shift, xSHR, rax, arg2reg);
|
||||
}
|
||||
vtlb_DynGenWrite_Const(64, aligned);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Load ECX with the source memory address that we're reading from.
|
||||
_freeX86reg(arg1regd);
|
||||
_eeMoveGPRtoR(arg1regd, _Rs_);
|
||||
if (_Imm_ != 0)
|
||||
xADD(arg1regd, _Imm_);
|
||||
|
||||
_freeX86reg(edx);
|
||||
_freeX86reg(calleeSavedReg1d);
|
||||
_freeX86reg(calleeSavedReg2d);
|
||||
_freeX86reg(arg2regd);
|
||||
_eeMoveGPRtoR(arg2reg, _Rt_);
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
|
||||
xMOV(calleeSavedReg1d, arg1regd);
|
||||
xMOV(calleeSavedReg2, arg2reg);
|
||||
xAND(arg1regd, ~0x07);
|
||||
xAND(calleeSavedReg1d, 0x7);
|
||||
xCMP(calleeSavedReg1d, 7);
|
||||
xForwardJE8 skip;
|
||||
xADD(calleeSavedReg1d, 1);
|
||||
int t2reg = vtlb_DynGenRead64(64, -1);
|
||||
int rtreg = _allocGPRtoXMMreg(-1, _Rt_, MODE_READ);
|
||||
vtlb_DynGenReadNonQuad(64, false);
|
||||
|
||||
//Calculate the shift from top bit to lowest
|
||||
xMOV(edx, 64);
|
||||
xSHL(calleeSavedReg1d, 3);
|
||||
xSUB(edx, calleeSavedReg1d);
|
||||
|
||||
sdlrhelper(calleeSavedReg1d, xPSLL, edx, xPSRL, xRegisterSSE(t2reg), xRegisterSSE(rtreg));
|
||||
|
||||
_deleteGPRtoXMMreg(_Rt_, 3);
|
||||
_freeXMMreg(t2reg);
|
||||
sdlrhelper(calleeSavedReg1d, xSHL, edx, xSHR, rax, calleeSavedReg2);
|
||||
|
||||
_eeMoveGPRtoR(arg1regd, _Rs_);
|
||||
if (_Imm_ != 0)
|
||||
xADD(arg1regd, _Imm_);
|
||||
xAND(arg1regd, ~0x7);
|
||||
xForwardJump8 end;
|
||||
xMOV(arg2reg, calleeSavedReg2);
|
||||
skip.SetTarget();
|
||||
xLEA(arg2reg, ptr[&cpuRegs.GPR.r[_Rt_].UL[0]]);
|
||||
end.SetTarget();
|
||||
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
|
||||
|
@ -785,7 +762,7 @@ void recSDL()
|
|||
////////////////////////////////////////////////////
|
||||
void recSDR()
|
||||
{
|
||||
#ifdef LOADSTORE_RECOMPILE
|
||||
#ifdef REC_STORES
|
||||
_flushEEreg(_Rt_); // flush register to mem
|
||||
if (GPR_IS_CONST1(_Rs_))
|
||||
{
|
||||
|
@ -794,15 +771,13 @@ void recSDR()
|
|||
u32 shift = (adr & 0x7) * 8;
|
||||
if (shift == 0)
|
||||
{
|
||||
xLEA(arg2reg, ptr[&cpuRegs.GPR.r[_Rt_].UL[0]]);
|
||||
xMOV(arg2reg, ptr[&cpuRegs.GPR.r[_Rt_].UL[0]]);
|
||||
}
|
||||
else
|
||||
{
|
||||
int t2reg = vtlb_DynGenRead64_Const(64, aligned, -1);
|
||||
int rtreg = _allocGPRtoXMMreg(-1, _Rt_, MODE_READ);
|
||||
sdlrhelper_const(64 - shift, xPSRL, shift, xPSLL, xRegisterSSE(t2reg), xRegisterSSE(rtreg));
|
||||
_deleteGPRtoXMMreg(_Rt_, 3);
|
||||
_freeXMMreg(t2reg);
|
||||
vtlb_DynGenReadNonQuad_Const(64, false, aligned);
|
||||
_eeMoveGPRtoR(arg2reg, _Rt_);
|
||||
sdlrhelper_const(64 - shift, xSHR, shift, xSHL, rax, arg2reg);
|
||||
}
|
||||
|
||||
vtlb_DynGenWrite_Const(64, aligned);
|
||||
|
@ -814,31 +789,32 @@ void recSDR()
|
|||
if (_Imm_ != 0)
|
||||
xADD(arg1regd, _Imm_);
|
||||
|
||||
_freeX86reg(edx);
|
||||
_freeX86reg(calleeSavedReg1d);
|
||||
_freeX86reg(calleeSavedReg2d);
|
||||
_freeX86reg(arg2regd);
|
||||
_eeMoveGPRtoR(arg2reg, _Rt_);
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
|
||||
xMOV(calleeSavedReg1d, arg1regd);
|
||||
xMOV(calleeSavedReg2, arg2reg);
|
||||
xAND(arg1regd, ~0x07);
|
||||
xAND(calleeSavedReg1d, 0x7);
|
||||
xForwardJE8 skip;
|
||||
int t2reg = vtlb_DynGenRead64(64, -1);
|
||||
int rtreg = _allocGPRtoXMMreg(-1, _Rt_, MODE_READ);
|
||||
vtlb_DynGenReadNonQuad(64, false);
|
||||
|
||||
xMOV(edx, 64);
|
||||
xSHL(calleeSavedReg1d, 3);
|
||||
xSUB(edx, calleeSavedReg1d);
|
||||
|
||||
sdlrhelper(edx, xPSRL, calleeSavedReg1d, xPSLL, xRegisterSSE(t2reg), xRegisterSSE(rtreg));
|
||||
|
||||
_deleteGPRtoXMMreg(_Rt_, 3);
|
||||
_freeXMMreg(t2reg);
|
||||
sdlrhelper(edx, xSHR, calleeSavedReg1d, xSHL, rax, calleeSavedReg2);
|
||||
|
||||
_eeMoveGPRtoR(arg1regd, _Rs_);
|
||||
if (_Imm_ != 0)
|
||||
xADD(arg1regd, _Imm_);
|
||||
xAND(arg1regd, ~0x7);
|
||||
xForwardJump8 end;
|
||||
xMOV(arg2reg, calleeSavedReg2);
|
||||
skip.SetTarget();
|
||||
xLEA(arg2reg, ptr[&cpuRegs.GPR.r[_Rt_].UL[0]]);
|
||||
end.SetTarget();
|
||||
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
|
||||
|
@ -871,7 +847,7 @@ void recLWC1()
|
|||
if (GPR_IS_CONST1(_Rs_))
|
||||
{
|
||||
int addr = g_cpuConstRegs[_Rs_].UL[0] + _Imm_;
|
||||
vtlb_DynGenRead32_Const(32, false, addr);
|
||||
vtlb_DynGenReadNonQuad_Const(32, false, addr);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -881,7 +857,7 @@ void recLWC1()
|
|||
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
|
||||
vtlb_DynGenRead32(32, false);
|
||||
vtlb_DynGenReadNonQuad(32, false);
|
||||
}
|
||||
|
||||
xMOV(ptr32[&fpuRegs.fpr[_Rt_].UL], eax);
|
||||
|
@ -961,7 +937,7 @@ void recLQC2()
|
|||
{
|
||||
int addr = g_cpuConstRegs[_Rs_].UL[0] + _Imm_;
|
||||
|
||||
gpr = vtlb_DynGenRead64_Const(128, addr, -1);
|
||||
gpr = vtlb_DynGenReadQuad_Const(128, addr, -1);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -971,7 +947,7 @@ void recLQC2()
|
|||
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
|
||||
gpr = vtlb_DynGenRead64(128, -1);
|
||||
gpr = vtlb_DynGenReadQuad(128, -1);
|
||||
}
|
||||
|
||||
if (_Rt_)
|
||||
|
@ -1005,7 +981,9 @@ void recSQC2()
|
|||
skip.SetTarget();
|
||||
skipvuidle.SetTarget();
|
||||
|
||||
xLEA(arg2reg, ptr[&VU0.VF[_Ft_].UD[0]]);
|
||||
int rpreg = _allocTempXMMreg(XMMT_INT, 1);
|
||||
xMOVAPS(xRegisterSSE(rpreg), ptr128[&VU0.VF[_Ft_].UD[0]]);
|
||||
_freeXMMreg(rpreg);
|
||||
|
||||
if (GPR_IS_CONST1(_Rs_))
|
||||
{
|
||||
|
|
|
@ -26,76 +26,6 @@
|
|||
using namespace vtlb_private;
|
||||
using namespace x86Emitter;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
// iAllocRegSSE -- allocates an xmm register. If no xmm register is available, xmm0 is
|
||||
// saved into g_globalXMMData and returned as a free register.
|
||||
//
|
||||
class iAllocRegSSE
|
||||
{
|
||||
protected:
|
||||
xRegisterSSE m_reg;
|
||||
bool m_free;
|
||||
|
||||
public:
|
||||
iAllocRegSSE()
|
||||
: m_reg(xmm0)
|
||||
, m_free(!!_hasFreeXMMreg())
|
||||
{
|
||||
if (m_free)
|
||||
m_reg = xRegisterSSE(_allocTempXMMreg(XMMT_INT, -1));
|
||||
else
|
||||
xStoreReg(m_reg);
|
||||
}
|
||||
|
||||
~iAllocRegSSE()
|
||||
{
|
||||
if (m_free)
|
||||
_freeXMMreg(m_reg.Id);
|
||||
else
|
||||
xRestoreReg(m_reg);
|
||||
}
|
||||
|
||||
operator xRegisterSSE() const { return m_reg; }
|
||||
};
|
||||
|
||||
// Moves 128 bits from point B to point A, using SSE's MOVAPS (or MOVDQA).
|
||||
// This instruction always uses an SSE register, even if all registers are allocated! It
|
||||
// saves an SSE register to memory first, performs the copy, and restores the register.
|
||||
//
|
||||
static void iMOV128_SSE(const xIndirectVoid& destRm, const xIndirectVoid& srcRm)
|
||||
{
|
||||
iAllocRegSSE reg;
|
||||
xMOVDQA(reg, srcRm);
|
||||
xMOVDQA(destRm, reg);
|
||||
}
|
||||
|
||||
// Moves 64 bits of data from point B to point A, using either SSE, or x86 registers
|
||||
//
|
||||
static void iMOV64_Smart(const xIndirectVoid& destRm, const xIndirectVoid& srcRm)
|
||||
{
|
||||
if (wordsize == 8)
|
||||
{
|
||||
xMOV(rax, srcRm);
|
||||
xMOV(destRm, rax);
|
||||
return;
|
||||
}
|
||||
|
||||
if (_hasFreeXMMreg())
|
||||
{
|
||||
// Move things using MOVLPS:
|
||||
xRegisterSSE reg(_allocTempXMMreg(XMMT_INT, -1));
|
||||
xMOVL.PS(reg, srcRm);
|
||||
xMOVL.PS(destRm, reg);
|
||||
_freeXMMreg(reg.Id);
|
||||
return;
|
||||
}
|
||||
|
||||
xMOV(eax, srcRm);
|
||||
xMOV(destRm, eax);
|
||||
xMOV(eax, srcRm + 4);
|
||||
xMOV(destRm + 4, eax);
|
||||
}
|
||||
|
||||
/*
|
||||
// Pseudo-Code For the following Dynarec Implementations -->
|
||||
|
||||
|
@ -169,39 +99,33 @@ namespace vtlb_private
|
|||
// ------------------------------------------------------------------------
|
||||
static void DynGen_DirectRead(u32 bits, bool sign)
|
||||
{
|
||||
pxAssert(bits == 8 || bits == 16 || bits == 32);
|
||||
pxAssert(bits == 8 || bits == 16 || bits == 32 || bits == 64 || bits == 128);
|
||||
|
||||
switch (bits)
|
||||
{
|
||||
case 8:
|
||||
if (sign)
|
||||
xMOVSX(eax, ptr8[arg1reg]);
|
||||
xMOVSX(rax, ptr8[arg1reg]);
|
||||
else
|
||||
xMOVZX(eax, ptr8[arg1reg]);
|
||||
xMOVZX(rax, ptr8[arg1reg]);
|
||||
break;
|
||||
|
||||
case 16:
|
||||
if (sign)
|
||||
xMOVSX(eax, ptr16[arg1reg]);
|
||||
xMOVSX(rax, ptr16[arg1reg]);
|
||||
else
|
||||
xMOVZX(eax, ptr16[arg1reg]);
|
||||
xMOVZX(rax, ptr16[arg1reg]);
|
||||
break;
|
||||
|
||||
case 32:
|
||||
xMOV(eax, ptr[arg1reg]);
|
||||
if (sign)
|
||||
xMOVSX(rax, ptr32[arg1reg]);
|
||||
else
|
||||
xMOV(eax, ptr32[arg1reg]);
|
||||
break;
|
||||
|
||||
jNO_DEFAULT
|
||||
}
|
||||
}
|
||||
|
||||
static void DynGen_DirectRead64(u32 bits)
|
||||
{
|
||||
pxAssert(bits == 64 || bits == 128);
|
||||
|
||||
switch (bits) {
|
||||
case 64:
|
||||
xMOVQZX(xmm0, ptr64[arg1reg]);
|
||||
xMOV(rax, ptr64[arg1reg]);
|
||||
break;
|
||||
|
||||
case 128:
|
||||
|
@ -233,11 +157,11 @@ namespace vtlb_private
|
|||
break;
|
||||
|
||||
case 64:
|
||||
iMOV64_Smart(ptr[arg1reg], ptr[arg2reg]);
|
||||
xMOV(ptr[arg1reg], arg2reg);
|
||||
break;
|
||||
|
||||
case 128:
|
||||
iMOV128_SSE(ptr[arg1reg], ptr[arg2reg]);
|
||||
xMOVAPS(ptr[arg1reg], xmm1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -257,7 +181,7 @@ alignas(__pagesize) static u8 m_IndirectDispatchers[__pagesize];
|
|||
//
|
||||
static u8* GetIndirectDispatcherPtr(int mode, int operandsize, int sign = 0)
|
||||
{
|
||||
assert(mode || operandsize >= 2 ? !sign : true);
|
||||
assert(mode || operandsize >= 3 ? !sign : true);
|
||||
|
||||
// Each dispatcher is aligned to 64 bytes. The actual dispatchers are only like
|
||||
// 20-some bytes each, but 64 byte alignment on functions that are called
|
||||
|
@ -268,7 +192,7 @@ static u8* GetIndirectDispatcherPtr(int mode, int operandsize, int sign = 0)
|
|||
// Gregory: a 32 bytes alignment is likely enough and more cache friendly
|
||||
const int A = 32;
|
||||
|
||||
return &m_IndirectDispatchers[(mode * (7 * A)) + (sign * 5 * A) + (operandsize * A)];
|
||||
return &m_IndirectDispatchers[(mode * (8 * A)) + (sign * 5 * A) + (operandsize * A)];
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
|
@ -319,16 +243,21 @@ static void DynGen_IndirectTlbDispatcher(int mode, int bits, bool sign)
|
|||
if (bits == 0)
|
||||
{
|
||||
if (sign)
|
||||
xMOVSX(eax, al);
|
||||
xMOVSX(rax, al);
|
||||
else
|
||||
xMOVZX(eax, al);
|
||||
xMOVZX(rax, al);
|
||||
}
|
||||
else if (bits == 1)
|
||||
{
|
||||
if (sign)
|
||||
xMOVSX(eax, ax);
|
||||
xMOVSX(rax, ax);
|
||||
else
|
||||
xMOVZX(eax, ax);
|
||||
xMOVZX(rax, ax);
|
||||
}
|
||||
else if (bits == 2)
|
||||
{
|
||||
if (sign)
|
||||
xCDQE();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -355,7 +284,7 @@ void vtlb_dynarec_init()
|
|||
{
|
||||
for (int bits = 0; bits < 5; ++bits)
|
||||
{
|
||||
for (int sign = 0; sign < (!mode && bits < 2 ? 2 : 1); sign++)
|
||||
for (int sign = 0; sign < (!mode && bits < 3 ? 2 : 1); sign++)
|
||||
{
|
||||
xSetPtr(GetIndirectDispatcherPtr(mode, bits, !!sign));
|
||||
|
||||
|
@ -383,17 +312,18 @@ static void vtlb_SetWriteback(u32* writeback)
|
|||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Dynarec Load Implementations
|
||||
int vtlb_DynGenRead64(u32 bits, int gpr)
|
||||
int vtlb_DynGenReadQuad(u32 bits, int gpr)
|
||||
{
|
||||
pxAssume(bits == 64 || bits == 128);
|
||||
pxAssume(bits == 128);
|
||||
|
||||
u32* writeback = DynGen_PrepRegs();
|
||||
|
||||
int reg = gpr == -1 ? _allocTempXMMreg(XMMT_INT, 0) : _allocGPRtoXMMreg(0, gpr, MODE_WRITE); // Handler returns in xmm0
|
||||
const int reg = gpr == -1 ? _allocTempXMMreg(XMMT_INT, 0) : _allocGPRtoXMMreg(0, gpr, MODE_WRITE); // Handler returns in xmm0
|
||||
DynGen_IndirectDispatch(0, bits);
|
||||
DynGen_DirectRead64(bits);
|
||||
DynGen_DirectRead(bits, false);
|
||||
|
||||
vtlb_SetWriteback(writeback); // return target for indirect's call/ret
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
|
@ -401,13 +331,13 @@ int vtlb_DynGenRead64(u32 bits, int gpr)
|
|||
// Recompiled input registers:
|
||||
// ecx - source address to read from
|
||||
// Returns read value in eax.
|
||||
void vtlb_DynGenRead32(u32 bits, bool sign)
|
||||
void vtlb_DynGenReadNonQuad(u32 bits, bool sign)
|
||||
{
|
||||
pxAssume(bits <= 32);
|
||||
pxAssume(bits <= 64);
|
||||
|
||||
u32* writeback = DynGen_PrepRegs();
|
||||
|
||||
DynGen_IndirectDispatch(0, bits, sign && bits < 32);
|
||||
DynGen_IndirectDispatch(0, bits, sign && bits < 64);
|
||||
DynGen_DirectRead(bits, sign);
|
||||
|
||||
vtlb_SetWriteback(writeback);
|
||||
|
@ -416,8 +346,10 @@ void vtlb_DynGenRead32(u32 bits, bool sign)
|
|||
// ------------------------------------------------------------------------
|
||||
// TLB lookup is performed in const, with the assumption that the COP0/TLB will clear the
|
||||
// recompiler if the TLB is changed.
|
||||
int vtlb_DynGenRead64_Const(u32 bits, u32 addr_const, int gpr)
|
||||
int vtlb_DynGenReadQuad_Const(u32 bits, u32 addr_const, int gpr)
|
||||
{
|
||||
pxAssert(bits == 128);
|
||||
|
||||
EE::Profiler.EmitConstMem(addr_const);
|
||||
|
||||
int reg;
|
||||
|
@ -426,31 +358,14 @@ int vtlb_DynGenRead64_Const(u32 bits, u32 addr_const, int gpr)
|
|||
{
|
||||
void* ppf = reinterpret_cast<void*>(vmv.assumePtr(addr_const));
|
||||
reg = gpr == -1 ? _allocTempXMMreg(XMMT_INT, -1) : _allocGPRtoXMMreg(-1, gpr, MODE_WRITE);
|
||||
switch (bits)
|
||||
{
|
||||
case 64:
|
||||
xMOVQZX(xRegisterSSE(reg), ptr64[ppf]);
|
||||
break;
|
||||
|
||||
case 128:
|
||||
xMOVAPS(xRegisterSSE(reg), ptr128[ppf]);
|
||||
break;
|
||||
|
||||
jNO_DEFAULT
|
||||
}
|
||||
xMOVAPS(xRegisterSSE(reg), ptr128[ppf]);
|
||||
}
|
||||
else
|
||||
{
|
||||
// has to: translate, find function, call function
|
||||
u32 paddr = vmv.assumeHandlerGetPAddr(addr_const);
|
||||
|
||||
int szidx = 0;
|
||||
switch (bits)
|
||||
{
|
||||
case 64: szidx = 3; break;
|
||||
case 128: szidx = 4; break;
|
||||
}
|
||||
|
||||
const int szidx = 4;
|
||||
iFlushCall(FLUSH_FULLVTLB);
|
||||
reg = gpr == -1 ? _allocTempXMMreg(XMMT_INT, 0) : _allocGPRtoXMMreg(0, gpr, MODE_WRITE); // Handler returns in xmm0
|
||||
xFastCall(vmv.assumeHandlerGetRaw(szidx, 0), paddr, arg2reg);
|
||||
|
@ -466,7 +381,7 @@ int vtlb_DynGenRead64_Const(u32 bits, u32 addr_const, int gpr)
|
|||
// TLB lookup is performed in const, with the assumption that the COP0/TLB will clear the
|
||||
// recompiler if the TLB is changed.
|
||||
//
|
||||
void vtlb_DynGenRead32_Const(u32 bits, bool sign, u32 addr_const)
|
||||
void vtlb_DynGenReadNonQuad_Const(u32 bits, bool sign, u32 addr_const)
|
||||
{
|
||||
EE::Profiler.EmitConstMem(addr_const);
|
||||
|
||||
|
@ -478,20 +393,27 @@ void vtlb_DynGenRead32_Const(u32 bits, bool sign, u32 addr_const)
|
|||
{
|
||||
case 8:
|
||||
if (sign)
|
||||
xMOVSX(eax, ptr8[(u8*)ppf]);
|
||||
xMOVSX(rax, ptr8[(u8*)ppf]);
|
||||
else
|
||||
xMOVZX(eax, ptr8[(u8*)ppf]);
|
||||
xMOVZX(rax, ptr8[(u8*)ppf]);
|
||||
break;
|
||||
|
||||
case 16:
|
||||
if (sign)
|
||||
xMOVSX(eax, ptr16[(u16*)ppf]);
|
||||
xMOVSX(rax, ptr16[(u16*)ppf]);
|
||||
else
|
||||
xMOVZX(eax, ptr16[(u16*)ppf]);
|
||||
xMOVZX(rax, ptr16[(u16*)ppf]);
|
||||
break;
|
||||
|
||||
case 32:
|
||||
xMOV(eax, ptr32[(u32*)ppf]);
|
||||
if (sign)
|
||||
xMOVSX(rax, ptr32[(u32*)ppf]);
|
||||
else
|
||||
xMOV(eax, ptr32[(u32*)ppf]);
|
||||
break;
|
||||
|
||||
case 64:
|
||||
xMOV(rax, ptr64[(u64*)ppf]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -506,6 +428,7 @@ void vtlb_DynGenRead32_Const(u32 bits, bool sign, u32 addr_const)
|
|||
case 8: szidx = 0; break;
|
||||
case 16: szidx = 1; break;
|
||||
case 32: szidx = 2; break;
|
||||
case 64: szidx = 3; break;
|
||||
}
|
||||
|
||||
// Shortcut for the INTC_STAT register, which many games like to spin on heavily.
|
||||
|
@ -523,16 +446,21 @@ void vtlb_DynGenRead32_Const(u32 bits, bool sign, u32 addr_const)
|
|||
if (bits == 8)
|
||||
{
|
||||
if (sign)
|
||||
xMOVSX(eax, al);
|
||||
xMOVSX(rax, al);
|
||||
else
|
||||
xMOVZX(eax, al);
|
||||
xMOVZX(rax, al);
|
||||
}
|
||||
else if (bits == 16)
|
||||
{
|
||||
if (sign)
|
||||
xMOVSX(eax, ax);
|
||||
xMOVSX(rax, ax);
|
||||
else
|
||||
xMOVZX(eax, ax);
|
||||
xMOVZX(rax, ax);
|
||||
}
|
||||
else if (bits == 32)
|
||||
{
|
||||
if (sign)
|
||||
xCDQE();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -582,11 +510,11 @@ void vtlb_DynGenWrite_Const(u32 bits, u32 addr_const)
|
|||
break;
|
||||
|
||||
case 64:
|
||||
iMOV64_Smart(ptr[(void*)ppf], ptr[arg2reg]);
|
||||
xMOV(ptr64[(void*)ppf], arg2reg);
|
||||
break;
|
||||
|
||||
case 128:
|
||||
iMOV128_SSE(ptr[(void*)ppf], ptr[arg2reg]);
|
||||
xMOVAPS(ptr128[(void*)ppf], xmm1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include "common/AlignedMalloc.h"
|
||||
#include "common/Perf.h"
|
||||
#include "common/StringUtil.h"
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// Micro VU - Main Functions
|
||||
|
@ -36,15 +37,12 @@ static __fi void mVUthrowHardwareDeficiency(const char* extFail, int vuIndex)
|
|||
|
||||
void mVUreserveCache(microVU& mVU)
|
||||
{
|
||||
mVU.cache_reserve = new RecompiledCodeReserve(StringUtil::StdStringFromFormat("Micro VU%u Recompiler Cache", mVU.index));
|
||||
mVU.cache_reserve->SetProfilerName(StringUtil::StdStringFromFormat("mVU%urec", mVU.index));
|
||||
|
||||
mVU.cache_reserve = new RecompiledCodeReserve(fmt::format("Micro VU{} Recompiler Cache", mVU.index), _16mb);
|
||||
mVU.cache_reserve->SetProfilerName(fmt::format("mVU{}rec", mVU.index));
|
||||
|
||||
mVU.cache = mVU.index
|
||||
? (u8*)mVU.cache_reserve->Reserve(GetVmMemory().MainMemory(), HostMemoryMap::mVU1recOffset, mVU.cacheSize * _1mb)
|
||||
: (u8*)mVU.cache_reserve->Reserve(GetVmMemory().MainMemory(), HostMemoryMap::mVU0recOffset, mVU.cacheSize * _1mb);
|
||||
|
||||
mVU.cache_reserve->ThrowIfNotOk();
|
||||
const size_t alloc_offset = mVU.index ? HostMemoryMap::mVU0recOffset : HostMemoryMap::mVU1recOffset;
|
||||
mVU.cache_reserve->Assign(GetVmMemory().CodeMemory(), alloc_offset, mVU.cacheSize * _1mb);
|
||||
mVU.cache = mVU.cache_reserve->GetPtr();
|
||||
}
|
||||
|
||||
// Only run this once per VU! ;)
|
||||
|
|
|
@ -72,7 +72,6 @@ struct nVifStruct
|
|||
nVifStruct() = default;
|
||||
};
|
||||
|
||||
extern void closeNewVif(int idx);
|
||||
extern void resetNewVif(int idx);
|
||||
extern void releaseNewVif(int idx);
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "newVif_UnpackSSE.h"
|
||||
#include "MTVU.h"
|
||||
#include "common/Perf.h"
|
||||
#include "common/StringUtil.h"
|
||||
#include "fmt/core.h"
|
||||
|
||||
static void recReset(int idx)
|
||||
|
@ -34,11 +35,12 @@ static void recReset(int idx)
|
|||
|
||||
void dVifReserve(int idx)
|
||||
{
|
||||
if (!nVif[idx].recReserve)
|
||||
nVif[idx].recReserve = new RecompiledCodeReserve(fmt::format("VIF{} Unpack Recompiler Cache", idx), _8mb);
|
||||
|
||||
auto offset = idx ? HostMemoryMap::VIF1recOffset : HostMemoryMap::VIF0recOffset;
|
||||
nVif[idx].recReserve->Reserve(GetVmMemory().MainMemory(), offset, 8 * _1mb);
|
||||
if (nVif[idx].recReserve)
|
||||
return;
|
||||
|
||||
const size_t offset = idx ? HostMemoryMap::VIF1recOffset : HostMemoryMap::VIF0recOffset;
|
||||
nVif[idx].recReserve = new RecompiledCodeReserve(StringUtil::StdStringFromFormat("VIF%u Unpack Recompiler Cache", idx));
|
||||
nVif[idx].recReserve->Assign(GetVmMemory().CodeMemory(), offset, 8 * _1mb);
|
||||
}
|
||||
|
||||
void dVifReset(int idx)
|
||||
|
|
|
@ -88,10 +88,6 @@ void resetNewVif(int idx)
|
|||
dVifReset(idx);
|
||||
}
|
||||
|
||||
void closeNewVif(int idx)
|
||||
{
|
||||
}
|
||||
|
||||
void releaseNewVif(int idx)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -345,12 +345,9 @@ void VifUnpackSSE_Init()
|
|||
|
||||
DevCon.WriteLn("Generating SSE-optimized unpacking functions for VIF interpreters...");
|
||||
|
||||
nVifUpkExec = new RecompiledCodeReserve("VIF SSE-optimized Unpacking Functions", _64kb);
|
||||
nVifUpkExec = new RecompiledCodeReserve("VIF SSE-optimized Unpacking Functions");
|
||||
nVifUpkExec->SetProfilerName("iVIF-SSE");
|
||||
nVifUpkExec->Reserve(GetVmMemory().BumpAllocator(), _64kb);
|
||||
|
||||
nVifUpkExec->ThrowIfNotOk();
|
||||
|
||||
nVifUpkExec->Assign(GetVmMemory().CodeMemory(), HostMemoryMap::VIFUnpackRecOffset, _1mb);
|
||||
xSetPtr(*nVifUpkExec);
|
||||
|
||||
for (int a = 0; a < 2; a++)
|
||||
|
@ -364,7 +361,7 @@ void VifUnpackSSE_Init()
|
|||
DevCon.Indent().WriteLn(
|
||||
"Reserved buffer : %u bytes @ 0x%016" PRIXPTR "\n"
|
||||
"x86 code generated : %u bytes\n",
|
||||
(uint)nVifUpkExec->GetCommittedBytes(),
|
||||
(uint)nVifUpkExec->GetSize(),
|
||||
nVifUpkExec->GetPtr(),
|
||||
(uint)(xGetPtr() - nVifUpkExec->GetPtr())
|
||||
);
|
||||
|
|
Loading…
Reference in New Issue