Rewritten most of kernel memory functions (WIP)

This commit is contained in:
ergo720 2017-12-11 21:18:40 +01:00
parent 6576933e34
commit 6a85203452
8 changed files with 210 additions and 95 deletions

View File

@ -1005,7 +1005,7 @@ void *Xbe::FindSection(char *zsSectionName)
return NULL;
}
void Xbe::PurgeBadChar(std::string &s, const std::string &illegalChars)
void Xbe::PurgeBadChar(std::string& s, const std::string& illegalChars)
{
for (auto it = s.begin(); it < s.end(); ++it)
{

View File

@ -331,7 +331,7 @@ void *CxbxRestoreContiguousMemory(char *szFilePath_memory_bin)
FILE_MAP_READ | FILE_MAP_WRITE | FILE_MAP_EXECUTE,
/* dwFileOffsetHigh */0,
/* dwFileOffsetLow */0,
CONTIGUOUS_MEMORY_SIZE,
CONTIGUOUS_MEMORY_CHIHIRO_SIZE,
(void *)CONTIGUOUS_MEMORY_BASE);
if (memory != (void *)CONTIGUOUS_MEMORY_BASE)
{
@ -343,11 +343,11 @@ void *CxbxRestoreContiguousMemory(char *szFilePath_memory_bin)
}
printf("[0x%.4X] INIT: Mapped %d MiB of Xbox contiguous memory at 0x%.8X to 0x%.8X\n",
GetCurrentThreadId(), CONTIGUOUS_MEMORY_SIZE / ONE_MB, CONTIGUOUS_MEMORY_BASE, CONTIGUOUS_MEMORY_BASE + CONTIGUOUS_MEMORY_SIZE - 1);
GetCurrentThreadId(), CONTIGUOUS_MEMORY_CHIHIRO_SIZE / ONE_MB, CONTIGUOUS_MEMORY_BASE, CONTIGUOUS_MEMORY_BASE + CONTIGUOUS_MEMORY_CHIHIRO_SIZE - 1);
if (NeedsInitialization)
{
memset(memory, 0, CONTIGUOUS_MEMORY_SIZE);
memset(memory, 0, CONTIGUOUS_MEMORY_CHIHIRO_SIZE);
printf("[0x%.4X] INIT: Initialized contiguous memory\n", GetCurrentThreadId());
}
else
@ -359,7 +359,7 @@ void *CxbxRestoreContiguousMemory(char *szFilePath_memory_bin)
FILE_MAP_READ | FILE_MAP_WRITE | FILE_MAP_EXECUTE,
/* dwFileOffsetHigh */0,
/* dwFileOffsetLow */0,
TILED_MEMORY_SIZE,
TILED_MEMORY_CHIHIRO_SIZE,
(void *)TILED_MEMORY_BASE);
if (tiled_memory != (void *)TILED_MEMORY_BASE)
{
@ -371,7 +371,7 @@ void *CxbxRestoreContiguousMemory(char *szFilePath_memory_bin)
}
printf("[0x%.4X] INIT: Mapped contiguous memory to Xbox tiled memory at 0x%.8X to 0x%.8X\n",
GetCurrentThreadId(), TILED_MEMORY_BASE, TILED_MEMORY_BASE + TILED_MEMORY_SIZE - 1);
GetCurrentThreadId(), TILED_MEMORY_BASE, TILED_MEMORY_BASE + TILED_MEMORY_CHIHIRO_SIZE - 1);
// Initialize the virtual manager :
g_VMManager.Initialize(hFileMapping);
@ -904,7 +904,7 @@ __declspec(noreturn) void CxbxKrnlInit
// Assign the running Xbe path, so it can be accessed via the kernel thunk 'XeImageFileName' :
xboxkrnl::XeImageFileName.MaximumLength = MAX_PATH;
xboxkrnl::XeImageFileName.Buffer = (PCHAR)g_VMManager.MapMemoryBlock(MAX_PATH, 0, ULONG_MAX);
xboxkrnl::XeImageFileName.Buffer = (PCHAR)g_VMManager.MapMemoryBlock(MAX_PATH, 0, MAXULONG_PTR);
sprintf(xboxkrnl::XeImageFileName.Buffer, "%c:\\%s", CxbxDefaultXbeDriveLetter, fileName.c_str());
xboxkrnl::XeImageFileName.Length = (USHORT)strlen(xboxkrnl::XeImageFileName.Buffer);
printf("[0x%.4X] INIT: XeImageFileName = %s\n", GetCurrentThreadId(), xboxkrnl::XeImageFileName.Buffer);

View File

@ -67,14 +67,17 @@ extern "C" {
// Define virtual base addresses for physical memory windows.
#define MM_SYSTEM_PHYSICAL_MAP KSEG0_BASE // = 0x80000000
#define MM_HIGHEST_PHYSICAL_PAGE 0x07FFF
#define MM_XBOX_HIGHEST_PHYSICAL_PAGE 0x03FFF
#define MM_CHIHIRO_HIGHEST_PHYSICAL_PAGE 0x07FFF
#define MM_64M_PHYSICAL_PAGE 0x04000
#define MM_INSTANCE_PHYSICAL_PAGE 0x03FE0 // Chihiro arcade should use 0x07FF0
#define MM_INSTANCE_PAGE_COUNT 16
#define CONTIGUOUS_MEMORY_BASE MM_SYSTEM_PHYSICAL_MAP // = 0x80000000
#define CONTIGUOUS_MEMORY_SIZE (64 * ONE_MB)
#define CONTIGUOUS_MEMORY_XBOX_SIZE (64 * ONE_MB)
#define CONTIGUOUS_MEMORY_CHIHIRO_SIZE (128 * ONE_MB)
#define TILED_MEMORY_BASE 0xF0000000 // Tiled memory is a mirror of contiguous memory, residing at 0xF0000000
#define TILED_MEMORY_SIZE CONTIGUOUS_MEMORY_SIZE
#define TILED_MEMORY_XBOX_SIZE CONTIGUOUS_MEMORY_XBOX_SIZE
#define TILED_MEMORY_CHIHIRO_SIZE CONTIGUOUS_MEMORY_CHIHIRO_SIZE
#define NV2A_MEMORY_BASE 0xFD000000 // See NV2A_ADDR
#define NV2A_MEMORY_SIZE 0x01000000 // See NV2A_SIZE
#define NV2A_PRAMIN_ADDR 0xFD700000

View File

@ -30,6 +30,7 @@
// *
// * (c) 2002-2003 Aaron Robinson <caustik@caustik.com>
// * (c) 2016 Patrick van Logchem <pvanlogchem@gmail.com>
// * (c) 2017 ergo720
// *
// * All rights reserved
// *
@ -50,7 +51,7 @@ namespace xboxkrnl
#include "EmuKrnlLogging.h"
#include "CxbxKrnl.h" // For CxbxKrnlCleanup
#include "Emu.h" // For EmuWarning()
#include "MemoryManager.h"
#include "VMManager.h"
// prevent name collisions
namespace NtDll
@ -114,6 +115,10 @@ XBSYSAPI EXPORTNUM(166) xboxkrnl::PVOID NTAPI xboxkrnl::MmAllocateContiguousMemo
PVOID pRet = (PVOID)1; // Marker, never returned, overwritten with NULL on input error
// size must be > 0
if (NumberOfBytes == 0)
pRet = NULL;
if (Alignment < PAGE_SIZE)
Alignment = PAGE_SIZE; // page boundary at least
@ -133,7 +138,7 @@ XBSYSAPI EXPORTNUM(166) xboxkrnl::PVOID NTAPI xboxkrnl::MmAllocateContiguousMemo
if (pRet != NULL)
{
// TODO : Allocate differently if(ProtectionType & PAGE_WRITECOMBINE)
pRet = g_MemoryManager.AllocateContiguous(NumberOfBytes, Alignment);
pRet = (PVOID)g_VMManager.Allocate(NumberOfBytes, LowestAcceptableAddress, HighestAcceptableAddress, NULL, Alignment, ProtectionType);
}
RETURN(pRet);
@ -153,8 +158,8 @@ XBSYSAPI EXPORTNUM(167) xboxkrnl::PVOID NTAPI xboxkrnl::MmAllocateSystemMemory
LOG_FUNC_ARG(Protect)
LOG_FUNC_END;
// TODO: should this be aligned?
PVOID pRet = g_MemoryManager.Allocate(NumberOfBytes);
// TODO: this should probably allocate the memory at a specific system virtual address region...
PVOID pRet = (PVOID)g_VMManager.Allocate(NumberOfBytes, 0, MAXULONG_PTR, NULL, PAGE_SIZE, Protect);
RETURN(pRet);
}
@ -173,8 +178,12 @@ XBSYSAPI EXPORTNUM(168) xboxkrnl::PVOID NTAPI xboxkrnl::MmClaimGpuInstanceMemory
LOG_FUNC_ARG_OUT(NumberOfPaddingBytes)
LOG_FUNC_END;
unsigned int highest_physical_page = MM_XBOX_HIGHEST_PHYSICAL_PAGE;
if (g_bIsChihiro)
{
*NumberOfPaddingBytes = 0;
highest_physical_page = MM_CHIHIRO_HIGHEST_PHYSICAL_PAGE;
}
else
*NumberOfPaddingBytes = MI_CONVERT_PFN_TO_PHYSICAL(MM_64M_PHYSICAL_PAGE) -
MI_CONVERT_PFN_TO_PHYSICAL(MM_INSTANCE_PHYSICAL_PAGE + MM_INSTANCE_PAGE_COUNT);
@ -189,7 +198,7 @@ XBSYSAPI EXPORTNUM(168) xboxkrnl::PVOID NTAPI xboxkrnl::MmClaimGpuInstanceMemory
}
#endif
PVOID Result = (PUCHAR)MI_CONVERT_PFN_TO_PHYSICAL(MM_HIGHEST_PHYSICAL_PAGE + 1)
PVOID Result = (PUCHAR)MI_CONVERT_PFN_TO_PHYSICAL(highest_physical_page + 1)
- *NumberOfPaddingBytes;
RETURN(Result);
@ -211,39 +220,19 @@ XBSYSAPI EXPORTNUM(169) xboxkrnl::PVOID NTAPI xboxkrnl::MmCreateKernelStack
LOG_FUNC_ARG(DebuggerThread)
LOG_FUNC_END;
NtDll::PVOID BaseAddress = NULL;
if (!NumberOfBytes) {
// NumberOfBytes cannot be zero when passed to NtAllocateVirtualMemory() below
CxbxKrnlCleanup("Assertion: 'NumberOfBytes != 0' in MmCreateKernelStack()");
}
if (NumberOfBytes & 0xFF) {
// Validate NumberOfBytes for alignment with the page size
CxbxKrnlCleanup("Assertion: '(NumberOfBytes & (PAGE_SIZE -1)) == 0' in MmCreateKernelStack()");
}
VAddr addr = NULL;
/**
* Function at present does not:
* - Create an additional guard PAGE_SIZE after allocation,
* - Fill allocation with any values
* - Treat DebuggerThread any differently
*/
NTSTATUS ret = NtDll::NtAllocateVirtualMemory(
/*ProcessHandle=*/g_CurrentProcessHandle,
/*BaseAddress=*/&BaseAddress,
/*ZeroBits=*/0,
/*RegionSize=*/&NumberOfBytes,
/*AllocationType=*/MEM_COMMIT,
/*Protect=*/PAGE_READWRITE);
if (NumberOfBytes)
{
addr = g_VMManager.AllocateStack(NumberOfBytes);
}
if (FAILED(ret))
EmuWarning("MmCreateKernelStack failed!");
else
BaseAddress = (PVOID)((ULONG)BaseAddress + NumberOfBytes);
RETURN(BaseAddress);
RETURN((PVOID)addr);
}
// ******************************************************************
@ -260,16 +249,7 @@ XBSYSAPI EXPORTNUM(170) xboxkrnl::VOID NTAPI xboxkrnl::MmDeleteKernelStack
LOG_FUNC_ARG(BaseAddress)
LOG_FUNC_END;
// TODO : Untested
ULONG RegionSize = 0;
NTSTATUS ret = NtDll::NtFreeVirtualMemory(
/*ProcessHandle=*/g_CurrentProcessHandle,
&BaseAddress,
&RegionSize,
/*FreeType=*/MEM_RELEASE);
if (FAILED(ret))
EmuWarning("MmDeleteKernelStack failed!");
g_VMManager.DeallocateStack((VAddr)BaseAddress);
}
// ******************************************************************
@ -290,8 +270,7 @@ XBSYSAPI EXPORTNUM(171) xboxkrnl::VOID NTAPI xboxkrnl::MmFreeContiguousMemory
LOG_IGNORED();
return;
}
g_MemoryManager.Free(BaseAddress);
g_VMManager.Deallocate((VAddr)BaseAddress);
// TODO -oDxbx: Sokoban crashes after this, at reset time (press Black + White to hit this).
// Tracing in assembly shows the crash takes place quite a while further, so it's probably
@ -313,7 +292,7 @@ XBSYSAPI EXPORTNUM(172) xboxkrnl::NTSTATUS NTAPI xboxkrnl::MmFreeSystemMemory
LOG_FUNC_ARG(NumberOfBytes)
LOG_FUNC_END;
g_MemoryManager.Free(BaseAddress);
g_VMManager.Deallocate((VAddr)BaseAddress);
RETURN(STATUS_SUCCESS);
}
@ -335,8 +314,9 @@ XBSYSAPI EXPORTNUM(173) xboxkrnl::PHYSICAL_ADDRESS NTAPI xboxkrnl::MmGetPhysical
// MmLockUnlockBufferPages, emulate this???
LOG_INCOMPLETE();
// We emulate Virtual/Physical memory 1:1
return (PHYSICAL_ADDRESS)BaseAddress;
PHYSICAL_ADDRESS addr = g_VMManager.TranslateVAddr((VAddr)BaseAddress);
return addr;
}
// ******************************************************************
@ -349,9 +329,14 @@ XBSYSAPI EXPORTNUM(174) xboxkrnl::BOOLEAN NTAPI xboxkrnl::MmIsAddressValid
{
LOG_FUNC_ONE_ARG_OUT(VirtualAddress);
LOG_UNIMPLEMENTED();
BOOLEAN Ret = FALSE;
RETURN(TRUE);
if (g_VMManager.QueryVAddr((VAddr)VirtualAddress))
{
Ret = TRUE;
}
RETURN(Ret);
}
// ******************************************************************
@ -370,6 +355,7 @@ XBSYSAPI EXPORTNUM(175) xboxkrnl::VOID NTAPI xboxkrnl::MmLockUnlockBufferPages
LOG_FUNC_ARG(Protect)
LOG_FUNC_END;
// REMARK: all the pages inside the main memory pool are non-relocatable so, for the moment, this function is pointless
LOG_IGNORED();
}
@ -387,6 +373,7 @@ XBSYSAPI EXPORTNUM(176) xboxkrnl::VOID NTAPI xboxkrnl::MmLockUnlockPhysicalPage
LOG_FUNC_ARG(UnlockPage)
LOG_FUNC_END;
// REMARK: all the pages inside the main memory pool are non-relocatable so, for the moment, this function is pointless
LOG_IGNORED();
}
@ -422,8 +409,7 @@ XBSYSAPI EXPORTNUM(177) xboxkrnl::PVOID NTAPI xboxkrnl::MmMapIoSpace
pRet = (PVOID)PhysicalAddress;
}
else {
// TODO: should this be aligned?
pRet = g_MemoryManager.Allocate(NumberOfBytes);
g_VMManager.Allocate(NumberOfBytes, 0, MAXULONG_PTR, NULL, PAGE_SIZE, ProtectionType);
LOG_INCOMPLETE();
}
@ -595,12 +581,7 @@ XBSYSAPI EXPORTNUM(182) xboxkrnl::VOID NTAPI xboxkrnl::MmSetAddressProtect
LOG_FUNC_ARG(NewProtect)
LOG_FUNC_END;
DWORD dwOldProtect;
if (!VirtualProtect(BaseAddress, NumberOfBytes, NewProtect & (~PAGE_WRITECOMBINE), &dwOldProtect))
EmuWarning("VirtualProtect Failed!");
DbgPrintf("KRNL: VirtualProtect was 0x%.8X -> 0x%.8X\n", dwOldProtect, NewProtect & (~PAGE_WRITECOMBINE));
g_VMManager.Protect((VAddr)BaseAddress, NumberOfBytes, NewProtect);
}
// ******************************************************************
@ -624,7 +605,7 @@ XBSYSAPI EXPORTNUM(183) xboxkrnl::NTSTATUS NTAPI xboxkrnl::MmUnmapIoSpace
// Don't free hardware devices (flash, NV2A, etc)
}
else {
g_MemoryManager.Free(BaseAddress);
g_VMManager.Allocate(NumberOfBytes, 0, MAXULONG_PTR);
LOG_INCOMPLETE();
}

View File

@ -91,7 +91,7 @@ XBSYSAPI EXPORTNUM(327) xboxkrnl::NTSTATUS NTAPI xboxkrnl::XeLoadSection
// Copy the section data
memcpy(Section->VirtualAddress, sectionData, Section->FileSize);
// Make this loading consume physical memory as well
g_VMManager.MapMemoryBlock(Section->FileSize, 0, ULONG_MAX, (VAddr)Section->VirtualAddress);
g_VMManager.MapMemoryBlock(Section->FileSize, 0, MAXULONG_PTR, (VAddr)Section->VirtualAddress);
}
// Increment the reference count

View File

@ -2759,12 +2759,14 @@ DEVICE_WRITE32(PRMVIO)
DEVICE_READ32(PFB)
{
static unsigned int contiguous_memory_size = CONTIGUOUS_MEMORY_XBOX_SIZE;
if (g_bIsChihiro) { contiguous_memory_size = CONTIGUOUS_MEMORY_CHIHIRO_SIZE; }
DEVICE_READ32_SWITCH() {
case NV_PFB_CFG0:
result = 3; // = NV_PFB_CFG0_PART_4
break;
case NV_PFB_CSTATUS:
result = CONTIGUOUS_MEMORY_SIZE;
result = contiguous_memory_size;
break;
case NV_PFB_WBC:
result = 0; // = !NV_PFB_WBC_FLUSH

View File

@ -148,14 +148,14 @@ void VMManager::Initialize(HANDLE file_view)
UpdatePageTableForVMA(dummy_kernel_vma);
// Map the contiguous memory
VMAIter contiguous_memory_vma_handle = CarveVMA(CONTIGUOUS_MEMORY_BASE, CONTIGUOUS_MEMORY_SIZE);
VMAIter contiguous_memory_vma_handle = CarveVMA(CONTIGUOUS_MEMORY_BASE, CONTIGUOUS_MEMORY_XBOX_SIZE);
VirtualMemoryArea& contiguous_memory_vma = contiguous_memory_vma_handle->second;
contiguous_memory_vma.type = VMAType::MemContiguous;
MergeAdjacentVMA(contiguous_memory_vma_handle);
UpdatePageTableForVMA(contiguous_memory_vma);
// Map the tiled memory
VMAIter tiled_memory_vma_handle = CarveVMA(TILED_MEMORY_BASE, TILED_MEMORY_SIZE);
VMAIter tiled_memory_vma_handle = CarveVMA(TILED_MEMORY_BASE, TILED_MEMORY_XBOX_SIZE);
VirtualMemoryArea& tiled_memory_vma = tiled_memory_vma_handle->second;
tiled_memory_vma.type = VMAType::MemTiled;
MergeAdjacentVMA(tiled_memory_vma_handle);
@ -251,6 +251,24 @@ void VMManager::InitializeChihiro()
m_Vma_map.erase(MCPX_BASE);
m_Vma_map.erase(BIOS_BASE);
// Map the contiguous memory
m_Vma_map[CONTIGUOUS_MEMORY_BASE].type = VMAType::Free;
MergeAdjacentVMA(GetVMAIterator(CONTIGUOUS_MEMORY_BASE));
VMAIter contiguous_memory_vma_handle = CarveVMA(CONTIGUOUS_MEMORY_BASE, CONTIGUOUS_MEMORY_CHIHIRO_SIZE);
VirtualMemoryArea& contiguous_memory_vma = contiguous_memory_vma_handle->second;
contiguous_memory_vma.type = VMAType::MemContiguous;
MergeAdjacentVMA(contiguous_memory_vma_handle);
UpdatePageTableForVMA(contiguous_memory_vma);
// Map the tiled memory
m_Vma_map[TILED_MEMORY_BASE].type = VMAType::Free;
MergeAdjacentVMA(GetVMAIterator(TILED_MEMORY_BASE));
VMAIter tiled_memory_vma_handle = CarveVMA(TILED_MEMORY_BASE, TILED_MEMORY_CHIHIRO_SIZE);
VirtualMemoryArea& tiled_memory_vma = tiled_memory_vma_handle->second;
tiled_memory_vma.type = VMAType::MemTiled;
MergeAdjacentVMA(tiled_memory_vma_handle);
UpdatePageTableForVMA(tiled_memory_vma);
// Map the bios
VMAIter bios_vma_handle = CarveVMA(BIOS_BASE, BIOS_CHIHIRO_SIZE);
VirtualMemoryArea& bios_vma = bios_vma_handle->second;
@ -275,19 +293,87 @@ void VMManager::VMStatistics() const
}
}
VAddr VMManager::MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VAddr addr)
VAddr VMManager::Allocate(size_t size, PAddr low_addr, PAddr high_addr, VAddr addr = NULL, ULONG Alignment = PAGE_SIZE, DWORD protect = PAGE_EXECUTE_READWRITE)
{
Lock();
VAddr addr = MapMemoryBlock(size, low_addr, high_addr, addr, Alignment);
if (addr)
{
ReprotectVMARange(addr, size, protect);
size_t aligned_size = (PAGE_MASK & size) ? ((size + PAGE_SIZE) & ~PAGE_MASK) : size;
protect & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY) ?
ImageMemoryInUse += aligned_size : NonImageMemoryInUse += aligned_size;
}
Unlock();
return addr;
}
VAddr VMManager::AllocateStack(size_t size)
{
Lock();
VAddr addr = MapMemoryBlock(size + PAGE_SIZE, 0, MAXULONG_PTR);
if (addr)
{
ReprotectVMARange(addr, PAGE_SIZE, PAGE_NOACCESS);
size_t aligned_size = (PAGE_MASK & size) ? ((size + PAGE_SIZE) & ~PAGE_MASK) : size;
addr = addr + PAGE_SIZE + aligned_size;
StackMemoryInUse += aligned_size;
}
Unlock();
return addr;
}
void VMManager::Deallocate(VAddr addr)
{
Lock();
UnmapRange(addr);
Unlock();
}
void VMManager::DeallocateStack(VAddr addr)
{
Lock();
ReprotectVMARange(addr, PAGE_SIZE, PAGE_EXECUTE_READWRITE);
UnmapRange(addr, true);
Unlock();
}
void VMManager::Protect(VAddr target, size_t size, DWORD new_perms)
{
Lock();
ReprotectVMARange(target, size, new_perms & (~PAGE_WRITECOMBINE));
Unlock();
}
bool VMManager::QueryVAddr(VAddr addr)
{
Lock();
bool ret = IsValidVirtualAddress(addr);
Unlock();
return ret;
}
PAddr VMManager::TranslateVAddr(VAddr addr)
{
Lock();
PAddr p_addr = TranslateVAddrToPAddr(addr);
Unlock();
return p_addr;
}
VAddr VMManager::MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VAddr addr, ULONG Alignment)
{
// Align the allocation size to the next page boundary and find a free memory block, if any
u32 offset;
size_t aligned_size = (PAGE_MASK & size) ? ((size + PAGE_SIZE) & ~PAGE_MASK) : size;
if (high_addr == ULONG_MAX)
if (high_addr == MAXULONG_PTR)
{
offset = AllocatePhysicalMemory(aligned_size);
}
else
{
PAddr aligned_low = low_addr & ~(UINT_PTR)PAGE_MASK;
PAddr aligned_high = high_addr & ~(UINT_PTR)PAGE_MASK;
PAddr aligned_low = low_addr & ~(UINT_PTR)(Alignment - 1);
PAddr aligned_high = high_addr & ~(UINT_PTR)(Alignment - 1);
if (aligned_high > m_MaxContiguousAddress) { aligned_high = m_MaxContiguousAddress; }
if (aligned_low > aligned_high) { aligned_low = aligned_high - PAGE_SIZE; }
@ -353,10 +439,10 @@ VAddr VMManager::MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VA
return addr;
}
void VMManager::UnmapRange(VAddr target)
void VMManager::UnmapRange(VAddr target, bool StackFlag)
{
VAddr aligned_start = target & ~(UINT_PTR)PAGE_MASK;
if (aligned_start == 0) // forbidden pages
if (aligned_start == 0) // forbidden page
{
// This should also generate a STATUS_GUARD_PAGE_VIOLATION exception from Windows
CxbxKrnlCleanup("Access to guarded page 0x0 detected!\n");
@ -392,7 +478,16 @@ The type was %u", it->second.type);
default:
{
size_t aligned_size = it->second.size;
if (it->second.type == VMAType::Allocated || it->second.type == VMAType::Xbe)
if (StackFlag)
{
StackMemoryInUse -= aligned_size;
}
else
{
it->second.permissions & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY) ?
ImageMemoryInUse -= aligned_size : NonImageMemoryInUse -= aligned_size;
}
if (it->second.type == VMAType::Allocated)
{
DeAllocatePhysicalMemory(it->second.backing_block);
}
@ -452,6 +547,16 @@ PAddr VMManager::TranslateVAddrToPAddr(const VAddr addr)
return NULL;
}
void VMManager::Lock()
{
EnterCriticalSection(&m_CriticalSection);
}
void VMManager::Unlock()
{
LeaveCriticalSection(&m_CriticalSection);
}
void VMManager::MapMemoryRegion(VAddr base, size_t size, PAddr target)
{
assert((base & PAGE_MASK) == 0); // unaligned address
@ -654,7 +759,6 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma)
break;
case VMAType::Fragmented:
case VMAType::Xbe:
{
MapSpecialRegion(vma.base, vma.size, vma.backing_block);
}

View File

@ -51,8 +51,6 @@ enum class VMAType : u32
Allocated,
// vma represents allocated memory mapped outside the second file view (allocated by VirtualAlloc)
Fragmented,
// vma represents the xbe sections loaded at 0x10000 (it is counted as allocated memory)
Xbe,
// contiguous memory
MemContiguous,
// tiled memory
@ -103,10 +101,11 @@ class VMManager : public PhysicalMemory
{
public:
// constructor
VMManager() {};
VMManager() { InitializeCriticalSectionAndSpinCount(&m_CriticalSection, 0x400); };
// destructor
~VMManager()
{
DeleteCriticalSection(&m_CriticalSection);
UnmapViewOfFile((void*)m_Base);
UnmapViewOfFile((void *)CONTIGUOUS_MEMORY_BASE);
UnmapViewOfFile((void*)TILED_MEMORY_BASE);
@ -120,16 +119,20 @@ class VMManager : public PhysicalMemory
void InitializeChihiro();
// print virtual memory statistics
void VMStatistics() const;
// creates a vma block to be mapped in memory at the specified VAddr, if requested
VAddr MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VAddr addr = NULL);
// creates a vma representing the memory block to remove
void UnmapRange(VAddr target);
// changes access permissions for a range of vma's, splitting them if necessary
void ReprotectVMARange(VAddr target, size_t size, DWORD new_perms);
// checks if a VAddr is valid; returns false if not
bool IsValidVirtualAddress(const VAddr addr);
// translates a VAddr to its corresponding PAddr; it must be valid
PAddr TranslateVAddrToPAddr(const VAddr addr);
// allocates a block of memory
VAddr Allocate(size_t size, PAddr low_addr, PAddr high_addr, VAddr addr = NULL, ULONG Alignment = PAGE_SIZE, DWORD protect = PAGE_EXECUTE_READWRITE);
// allocate stack memory
VAddr AllocateStack(size_t size);
// deallocate a block of memory
void Deallocate(VAddr addr);
// deallocate stack memory
void DeallocateStack(VAddr addr);
// changes the protections of a memory region
void Protect(VAddr target, size_t size, DWORD new_perms);
// query if a VAddr is valid
bool QueryVAddr(VAddr addr);
// translate a VAddr
PAddr TranslateVAddr(VAddr addr);
private:
@ -141,7 +144,25 @@ class VMManager : public PhysicalMemory
VAddr m_Base = 0;
// handle of the second file view region
HANDLE m_hAliasedView = NULL;
// critical section lock to synchronize accesses
CRITICAL_SECTION m_CriticalSection;
// amount of image virtual memory in use
size_t ImageMemoryInUse = 0;
// amount of non - image virtual memory in use
size_t NonImageMemoryInUse = 0;
// amount of stack virtual memory in use
size_t StackMemoryInUse = 0;
// creates a vma block to be mapped in memory at the specified VAddr, if requested
VAddr MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VAddr addr = NULL, ULONG Alignment = PAGE_SIZE);
// creates a vma representing the memory block to remove
void UnmapRange(VAddr target, bool StackFlag = false);
// changes access permissions for a range of vma's, splitting them if necessary
void ReprotectVMARange(VAddr target, size_t size, DWORD new_perms);
// checks if a VAddr is valid; returns false if not
bool IsValidVirtualAddress(const VAddr addr);
// translates a VAddr to its corresponding PAddr; it must be valid
PAddr TranslateVAddrToPAddr(const VAddr addr);
// maps a new allocation in the virtual address space
void MapMemoryRegion(VAddr base, size_t size, PAddr target);
// maps a special allocation outside the virtual address space of the second file view
@ -166,6 +187,10 @@ class VMManager : public PhysicalMemory
VMAIter ReprotectVMA(VMAIter vma_handle, DWORD new_perms);
// updates the page table
void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
// acquires the critical section
void Lock();
// releases the critical section
void Unlock();
};