Overlapped allocations + Chihiro/Debug stuff
This commit is contained in:
parent
1360ebbc7a
commit
4c393e8ae1
|
@ -71,8 +71,8 @@ XBSYSAPI EXPORTNUM(169) PVOID NTAPI MmCreateKernelStack
|
|||
// ******************************************************************
|
||||
XBSYSAPI EXPORTNUM(170) VOID NTAPI MmDeleteKernelStack
|
||||
(
|
||||
IN PVOID EndAddress,
|
||||
IN PVOID BaseAddress
|
||||
IN PVOID StackBase,
|
||||
IN PVOID StackLimit
|
||||
);
|
||||
|
||||
// ******************************************************************
|
||||
|
|
|
@ -119,6 +119,9 @@ extern XbeType g_XbeType;
|
|||
/*! indicates emulation of an Chihiro (arcade, instead of Xbox console) executable */
|
||||
extern bool g_bIsChihiro;
|
||||
|
||||
/*! indicates emulation of a Debug xbe executable */
|
||||
extern bool g_bIsDebug;
|
||||
|
||||
/*! maximum number of threads cxbx can handle */
|
||||
#define MAXIMUM_XBOX_THREADS 256
|
||||
|
||||
|
|
|
@ -97,6 +97,7 @@ HANDLE CxbxBasePathHandle;
|
|||
Xbe* CxbxKrnl_Xbe = NULL;
|
||||
XbeType g_XbeType = xtRetail;
|
||||
bool g_bIsChihiro = false;
|
||||
bool g_bIsDebug = false;
|
||||
DWORD_PTR g_CPUXbox = 0;
|
||||
DWORD_PTR g_CPUOthers = 0;
|
||||
|
||||
|
@ -609,13 +610,14 @@ void CxbxKrnlMain(int argc, char* argv[])
|
|||
// Detect XBE type :
|
||||
g_XbeType = GetXbeType(&CxbxKrnl_Xbe->m_Header);
|
||||
|
||||
// Register if we're running an Chihiro executable (otherwise it's an Xbox executable)
|
||||
// Register if we're running an Chihiro executable or a debug xbe (otherwise it's an Xbox retail executable)
|
||||
g_bIsChihiro = (g_XbeType == xtChihiro);
|
||||
g_bIsDebug = (g_XbeType == xtDebug);
|
||||
|
||||
if (g_bIsChihiro)
|
||||
if (g_bIsChihiro || g_bIsDebug)
|
||||
{
|
||||
// Initialize the Chihiro - specific memory ranges
|
||||
g_VMManager.InitializeChihiro();
|
||||
// Initialize the Chihiro/Debug - specific memory ranges
|
||||
g_VMManager.InitializeChihiroDebug();
|
||||
}
|
||||
|
||||
CxbxRestorePersistentMemoryRegions();
|
||||
|
|
|
@ -70,7 +70,8 @@ extern "C" {
|
|||
#define MM_XBOX_HIGHEST_PHYSICAL_PAGE 0x03FFF
|
||||
#define MM_CHIHIRO_HIGHEST_PHYSICAL_PAGE 0x07FFF
|
||||
#define MM_64M_PHYSICAL_PAGE 0x04000
|
||||
#define MM_INSTANCE_PHYSICAL_PAGE 0x03FE0 // Chihiro arcade should use 0x07FF0
|
||||
#define MM_XBOX_INSTANCE_PHYSICAL_PAGE 0x03FE0
|
||||
#define MM_CHIHIRO_INSTANCE_PHYSICAL_PAGE 0x07FF0
|
||||
#define MM_INSTANCE_PAGE_COUNT 16
|
||||
#define CONTIGUOUS_MEMORY_BASE MM_SYSTEM_PHYSICAL_MAP // = 0x80000000
|
||||
#define CONTIGUOUS_MEMORY_XBOX_SIZE (64 * ONE_MB)
|
||||
|
@ -99,7 +100,7 @@ extern "C" {
|
|||
#define BIOS_CHIHIRO_SIZE 0x1000000
|
||||
#define MCPX_BASE 0xFFFFFE00
|
||||
#define MCPX_SIZE 0x200
|
||||
#define MAX_VIRTUAL_ADDRESS 0x100000000
|
||||
#define MAX_VIRTUAL_ADDRESS 0xFFFFFFFF
|
||||
|
||||
/*! memory size per system */
|
||||
#define XBOX_MEMORY_SIZE (64 * ONE_MB)
|
||||
|
|
|
@ -8526,7 +8526,7 @@ XTL::X_D3DPalette * WINAPI XTL::EMUPATCH(D3DDevice_CreatePalette2)
|
|||
X_D3DPalette *pPalette = EmuNewD3DPalette();
|
||||
|
||||
pPalette->Common |= (Size << X_D3DPALETTE_COMMON_PALETTESIZE_SHIFT);
|
||||
pPalette->Data = (DWORD)g_VMManager.Allocate(XboxD3DPaletteSizeToBytes(Size));
|
||||
pPalette->Data = (DWORD)g_VMManager.Allocate(XboxD3DPaletteSizeToBytes(Size), 0, (~((::ULONG_PTR)0)), 0, PAGE_SIZE, PAGE_EXECUTE_READWRITE, false);
|
||||
pPalette->Lock = X_D3DRESOURCE_LOCK_PALETTE; // emulated reference count for palettes
|
||||
|
||||
// TODO: Should't we register the palette with a call to
|
||||
|
|
|
@ -72,7 +72,7 @@ XBSYSAPI EXPORTNUM(102) xboxkrnl::PVOID xboxkrnl::MmGlobalData[8] = { NULL, NULL
|
|||
// the xbox kernel. Kernel code accessses this as a normal variable.
|
||||
// XAPI code however, reference to the address of this kernel variable,
|
||||
// thus use indirection (*LaunchDataPage) to get to the same contents.
|
||||
XBSYSAPI EXPORTNUM(164) xboxkrnl::PLAUNCH_DATA_PAGE xboxkrnl::LaunchDataPage = NULL;
|
||||
XBSYSAPI EXPORTNUM(164) xboxkrnl::PLAUNCH_DATA_PAGE xboxkrnl::LaunchDataPage = xbnull;
|
||||
|
||||
// ******************************************************************
|
||||
// * 0x00A5 - MmAllocateContiguousMemory()
|
||||
|
@ -112,34 +112,34 @@ XBSYSAPI EXPORTNUM(166) xboxkrnl::PVOID NTAPI xboxkrnl::MmAllocateContiguousMemo
|
|||
LOG_FUNC_ARG(HighestAcceptableAddress)
|
||||
LOG_FUNC_ARG(Alignment)
|
||||
LOG_FUNC_ARG_TYPE(PROTECTION_TYPE, ProtectionType)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
PVOID pRet = (PVOID)1; // Marker, never returned, overwritten with NULL on input error
|
||||
|
||||
// size must be > 0
|
||||
if (NumberOfBytes == 0)
|
||||
pRet = NULL;
|
||||
pRet = xbnull;
|
||||
|
||||
if (Alignment < PAGE_SIZE)
|
||||
Alignment = PAGE_SIZE; // page boundary at least
|
||||
|
||||
// Only known flags are allowed
|
||||
if ((ProtectionType & ~PAGE_KNOWN_FLAGS) != 0)
|
||||
pRet = NULL;
|
||||
pRet = xbnull;
|
||||
|
||||
// Either PAGE_READONLY or PAGE_READWRITE must be set (not both, nor none)
|
||||
if (((ProtectionType & PAGE_READONLY) > 0) == ((ProtectionType & PAGE_READWRITE) > 0))
|
||||
pRet = NULL;
|
||||
pRet = xbnull;
|
||||
|
||||
// Combining PAGE_NOCACHE and PAGE_WRITECOMBINE isn't allowed
|
||||
if ((ProtectionType & (PAGE_NOCACHE | PAGE_WRITECOMBINE)) == (PAGE_NOCACHE | PAGE_WRITECOMBINE))
|
||||
pRet = NULL;
|
||||
pRet = xbnull;
|
||||
|
||||
// Allocate when input arguments are valid
|
||||
if (pRet != NULL)
|
||||
if (pRet != xbnull)
|
||||
{
|
||||
// TODO : Allocate differently if(ProtectionType & PAGE_WRITECOMBINE)
|
||||
pRet = (PVOID)g_VMManager.Allocate(NumberOfBytes, LowestAcceptableAddress, HighestAcceptableAddress, NULL, Alignment, ProtectionType);
|
||||
pRet = (PVOID)g_VMManager.Allocate(NumberOfBytes, LowestAcceptableAddress, HighestAcceptableAddress, NULL, Alignment, ProtectionType, false);
|
||||
}
|
||||
|
||||
RETURN(pRet);
|
||||
|
@ -157,7 +157,7 @@ XBSYSAPI EXPORTNUM(167) xboxkrnl::PVOID NTAPI xboxkrnl::MmAllocateSystemMemory
|
|||
LOG_FUNC_BEGIN
|
||||
LOG_FUNC_ARG(NumberOfBytes)
|
||||
LOG_FUNC_ARG(Protect)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
// TODO: this should probably allocate the memory at a specific system virtual address region...
|
||||
PVOID pRet = (PVOID)g_VMManager.Allocate(NumberOfBytes, 0, MAXULONG_PTR, NULL, PAGE_SIZE, Protect);
|
||||
|
@ -177,17 +177,18 @@ XBSYSAPI EXPORTNUM(168) xboxkrnl::PVOID NTAPI xboxkrnl::MmClaimGpuInstanceMemory
|
|||
LOG_FUNC_BEGIN
|
||||
LOG_FUNC_ARG(NumberOfBytes)
|
||||
LOG_FUNC_ARG_OUT(NumberOfPaddingBytes)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
unsigned int highest_physical_page = MM_XBOX_HIGHEST_PHYSICAL_PAGE;
|
||||
if (g_bIsChihiro)
|
||||
unsigned int instance_physical_page = MM_XBOX_INSTANCE_PHYSICAL_PAGE;
|
||||
if (g_bIsChihiro || g_bIsDebug)
|
||||
{
|
||||
*NumberOfPaddingBytes = 0;
|
||||
highest_physical_page = MM_CHIHIRO_HIGHEST_PHYSICAL_PAGE;
|
||||
}
|
||||
else
|
||||
*NumberOfPaddingBytes = MI_CONVERT_PFN_TO_PHYSICAL(MM_64M_PHYSICAL_PAGE) -
|
||||
MI_CONVERT_PFN_TO_PHYSICAL(MM_INSTANCE_PHYSICAL_PAGE + MM_INSTANCE_PAGE_COUNT);
|
||||
MI_CONVERT_PFN_TO_PHYSICAL(instance_physical_page + MM_INSTANCE_PAGE_COUNT);
|
||||
|
||||
DbgPrintf("KNRL: MmClaimGpuInstanceMemory : *NumberOfPaddingBytes = 0x%.8X\n", *NumberOfPaddingBytes);
|
||||
|
||||
|
@ -219,9 +220,9 @@ XBSYSAPI EXPORTNUM(169) xboxkrnl::PVOID NTAPI xboxkrnl::MmCreateKernelStack
|
|||
LOG_FUNC_BEGIN
|
||||
LOG_FUNC_ARG(NumberOfBytes)
|
||||
LOG_FUNC_ARG(DebuggerThread)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
VAddr addr = NULL;
|
||||
VAddr addr = xbnull;
|
||||
|
||||
/**
|
||||
* Function at present does not:
|
||||
|
@ -241,16 +242,20 @@ XBSYSAPI EXPORTNUM(169) xboxkrnl::PVOID NTAPI xboxkrnl::MmCreateKernelStack
|
|||
// ******************************************************************
|
||||
XBSYSAPI EXPORTNUM(170) xboxkrnl::VOID NTAPI xboxkrnl::MmDeleteKernelStack
|
||||
(
|
||||
PVOID EndAddress,
|
||||
PVOID BaseAddress
|
||||
PVOID StackBase,
|
||||
PVOID StackLimit
|
||||
)
|
||||
{
|
||||
LOG_FUNC_BEGIN
|
||||
LOG_FUNC_ARG(EndAddress)
|
||||
LOG_FUNC_ARG(BaseAddress)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_ARG(StackBase)
|
||||
LOG_FUNC_ARG(StackLimit)
|
||||
LOG_FUNC_END;
|
||||
|
||||
g_VMManager.DeallocateStack((VAddr)BaseAddress);
|
||||
size_t ActualSize = ((VAddr)StackBase - (VAddr)StackLimit) + PAGE_SIZE;
|
||||
|
||||
VAddr StackBottom = (VAddr)StackBase - ActualSize;
|
||||
|
||||
g_VMManager.DeallocateStack(StackBottom);
|
||||
}
|
||||
|
||||
// ******************************************************************
|
||||
|
@ -286,7 +291,7 @@ XBSYSAPI EXPORTNUM(172) xboxkrnl::NTSTATUS NTAPI xboxkrnl::MmFreeSystemMemory
|
|||
LOG_FUNC_BEGIN
|
||||
LOG_FUNC_ARG(BaseAddress)
|
||||
LOG_FUNC_ARG(NumberOfBytes)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
g_VMManager.Deallocate((VAddr)BaseAddress);
|
||||
|
||||
|
@ -310,9 +315,7 @@ XBSYSAPI EXPORTNUM(173) xboxkrnl::PHYSICAL_ADDRESS NTAPI xboxkrnl::MmGetPhysical
|
|||
// MmLockUnlockBufferPages, emulate this???
|
||||
LOG_INCOMPLETE();
|
||||
|
||||
PHYSICAL_ADDRESS addr = g_VMManager.TranslateVAddr((VAddr)BaseAddress);
|
||||
|
||||
return addr;
|
||||
return g_VMManager.TranslateVAddr((VAddr)BaseAddress);
|
||||
}
|
||||
|
||||
// ******************************************************************
|
||||
|
@ -349,7 +352,7 @@ XBSYSAPI EXPORTNUM(175) xboxkrnl::VOID NTAPI xboxkrnl::MmLockUnlockBufferPages
|
|||
LOG_FUNC_ARG(BaseAddress)
|
||||
LOG_FUNC_ARG(NumberOfBytes)
|
||||
LOG_FUNC_ARG(Protect)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
// REMARK: all the pages inside the main memory pool are non-relocatable so, for the moment, this function is pointless
|
||||
LOG_IGNORED();
|
||||
|
@ -367,7 +370,7 @@ XBSYSAPI EXPORTNUM(176) xboxkrnl::VOID NTAPI xboxkrnl::MmLockUnlockPhysicalPage
|
|||
LOG_FUNC_BEGIN
|
||||
LOG_FUNC_ARG(PhysicalAddress)
|
||||
LOG_FUNC_ARG(UnlockPage)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
// REMARK: all the pages inside the main memory pool are non-relocatable so, for the moment, this function is pointless
|
||||
LOG_IGNORED();
|
||||
|
@ -395,7 +398,7 @@ XBSYSAPI EXPORTNUM(177) xboxkrnl::PVOID NTAPI xboxkrnl::MmMapIoSpace
|
|||
LOG_FUNC_ARG(PhysicalAddress)
|
||||
LOG_FUNC_ARG(NumberOfBytes)
|
||||
LOG_FUNC_ARG(ProtectionType)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
PVOID pRet;
|
||||
|
||||
|
@ -426,7 +429,7 @@ XBSYSAPI EXPORTNUM(178) xboxkrnl::VOID NTAPI xboxkrnl::MmPersistContiguousMemory
|
|||
LOG_FUNC_ARG(BaseAddress)
|
||||
LOG_FUNC_ARG(NumberOfBytes)
|
||||
LOG_FUNC_ARG(Persist)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
if (BaseAddress == LaunchDataPage)
|
||||
{
|
||||
|
@ -543,7 +546,7 @@ XBSYSAPI EXPORTNUM(182) xboxkrnl::VOID NTAPI xboxkrnl::MmSetAddressProtect
|
|||
LOG_FUNC_ARG(BaseAddress)
|
||||
LOG_FUNC_ARG(NumberOfBytes)
|
||||
LOG_FUNC_ARG(NewProtect)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
g_VMManager.Protect((VAddr)BaseAddress, NumberOfBytes, NewProtect);
|
||||
}
|
||||
|
@ -563,7 +566,7 @@ XBSYSAPI EXPORTNUM(183) xboxkrnl::NTSTATUS NTAPI xboxkrnl::MmUnmapIoSpace
|
|||
LOG_FUNC_BEGIN
|
||||
LOG_FUNC_ARG(BaseAddress)
|
||||
LOG_FUNC_ARG(NumberOfBytes)
|
||||
LOG_FUNC_END;
|
||||
LOG_FUNC_END;
|
||||
|
||||
if ((xbaddr)BaseAddress >= XBOX_WRITE_COMBINED_BASE) { // 0xF0000000
|
||||
// Don't free hardware devices (flash, NV2A, etc)
|
||||
|
|
|
@ -93,33 +93,16 @@ XBSYSAPI EXPORTNUM(327) xboxkrnl::NTSTATUS NTAPI xboxkrnl::XeLoadSection
|
|||
|
||||
// ergo720: I can't just +/- PAGE_SIZE the VirtualAddress and the VirtualSize of a section because some titles have
|
||||
// sections less than PAGE_SIZE, which will cause again an overlap with the next section since both will have the
|
||||
// same aligned starting address. A possible solution to this is to use std::multimap, which allows duplicate keys
|
||||
// but at this point I'm not sure if that will cause issues with the rest of the virtual memory manager code.
|
||||
// same aligned starting address.
|
||||
|
||||
// Test case: Dead or Alive 3, section XGRPH has a size of 764 bytes
|
||||
// XGRPH DSOUND
|
||||
// 1F18A0 + 2FC -> aligned_start = 1F1000 1F1BA0 -> aligned_start = 1F1000 <- collision
|
||||
|
||||
VAddr BaseAddress = (VAddr)Section->VirtualAddress;
|
||||
VAddr EndingAddress = (VAddr)Section->VirtualAddress + Section->VirtualSize;
|
||||
|
||||
if ((*Section->TailReferenceCount) != 0)
|
||||
// Make this loading consume physical memory as well
|
||||
if (!g_VMManager.Allocate(Section->VirtualSize, 0, MAXULONG_PTR, (VAddr)Section->VirtualAddress))
|
||||
{
|
||||
EndingAddress &= ~PAGE_MASK;
|
||||
}
|
||||
|
||||
if ((*Section->HeadReferenceCount) != 0)
|
||||
{
|
||||
BaseAddress = (BaseAddress + PAGE_SIZE) & ~PAGE_MASK;
|
||||
}
|
||||
|
||||
if (EndingAddress > BaseAddress)
|
||||
{
|
||||
// Make this loading consume physical memory as well
|
||||
if (!g_VMManager.Allocate(EndingAddress - BaseAddress, 0, MAXULONG_PTR, BaseAddress))
|
||||
{
|
||||
ret = STATUS_NO_MEMORY;
|
||||
}
|
||||
ret = STATUS_NO_MEMORY;
|
||||
}
|
||||
// Increment the head/tail page reference counters
|
||||
(*Section->HeadReferenceCount)++;
|
||||
|
@ -160,6 +143,8 @@ XBSYSAPI EXPORTNUM(328) xboxkrnl::NTSTATUS NTAPI xboxkrnl::XeUnloadSection
|
|||
if (Section->SectionReferenceCount == 0) {
|
||||
memset(Section->VirtualAddress, 0, Section->VirtualSize);
|
||||
|
||||
// REMARK: the following can be tested with Broken Sword - The Sleeping Dragon, RalliSport Challenge, ...
|
||||
|
||||
VAddr BaseAddress = (VAddr)Section->VirtualAddress;
|
||||
VAddr EndingAddress = (VAddr)Section->VirtualAddress + Section->VirtualSize;
|
||||
|
||||
|
@ -179,11 +164,7 @@ XBSYSAPI EXPORTNUM(328) xboxkrnl::NTSTATUS NTAPI xboxkrnl::XeUnloadSection
|
|||
|
||||
if (EndingAddress > BaseAddress)
|
||||
{
|
||||
g_VMManager.Deallocate(BaseAddress);
|
||||
}
|
||||
else
|
||||
{
|
||||
g_VMManager.DeallocateOverlapped((VAddr)Section->VirtualAddress);
|
||||
g_VMManager.Deallocate(BaseAddress, EndingAddress - BaseAddress);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2766,7 +2766,7 @@ DEVICE_READ32(PFB)
|
|||
break;
|
||||
case NV_PFB_CSTATUS:
|
||||
{
|
||||
if (g_bIsChihiro) { result = CONTIGUOUS_MEMORY_CHIHIRO_SIZE; break; }
|
||||
if (g_bIsChihiro || g_bIsDebug) { result = CONTIGUOUS_MEMORY_CHIHIRO_SIZE; break; }
|
||||
result = CONTIGUOUS_MEMORY_XBOX_SIZE;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -1042,7 +1042,7 @@ DWORD WINAPI XTL::EMUPATCH(XLaunchNewImageA)
|
|||
|
||||
// Update the kernel's LaunchDataPage :
|
||||
{
|
||||
if (xboxkrnl::LaunchDataPage == NULL)
|
||||
if (xboxkrnl::LaunchDataPage == xbnull)
|
||||
{
|
||||
PVOID LaunchDataVAddr = xboxkrnl::MmAllocateContiguousMemory(sizeof(xboxkrnl::LAUNCH_DATA_PAGE));
|
||||
if (!LaunchDataVAddr)
|
||||
|
@ -1058,11 +1058,11 @@ DWORD WINAPI XTL::EMUPATCH(XLaunchNewImageA)
|
|||
|
||||
xboxkrnl::MmPersistContiguousMemory((PVOID)xboxkrnl::LaunchDataPage, PAGE_SIZE, TRUE);
|
||||
|
||||
if (pLaunchData != NULL)
|
||||
if (pLaunchData != xbnull)
|
||||
// Save the launch data
|
||||
memcpy(&(xboxkrnl::LaunchDataPage->LaunchData[0]), pLaunchData, sizeof(LAUNCH_DATA));
|
||||
|
||||
if (lpTitlePath == NULL)
|
||||
if (lpTitlePath == xbnull)
|
||||
{
|
||||
// If no path is specified, then the xbe is rebooting to dashboard
|
||||
char szDashboardPath[MAX_PATH] = { 0 };
|
||||
|
|
|
@ -86,6 +86,10 @@ PAddr PhysicalMemory::AllocatePhysicalMemory(size_t size)
|
|||
}
|
||||
}
|
||||
|
||||
// Reinstate this if the nv2a instance memory allocation is found to be ever deallocated after being
|
||||
// mapped during initialization. The only one that could do it is MmClaimGpuInstanceMemory, however it doesn't seem
|
||||
// to deallocate the region, just to repurpose it...
|
||||
|
||||
//u32 offset = std::next(rit)->first + std::next(rit)->second;
|
||||
/*if (rit == max_contiguous_it && m_MaxContiguousAddress - offset >= size)
|
||||
{
|
||||
|
@ -274,6 +278,42 @@ VAddr PhysicalMemory::AllocateFragmented(size_t size)
|
|||
return aligned_start;
|
||||
}
|
||||
|
||||
void PhysicalMemory::ShrinkPhysicalAllocation(PAddr addr, size_t offset, bool bFragmentedMap, bool bStart)
|
||||
{
|
||||
if (!offset) { return; } // nothing to do
|
||||
|
||||
if (bFragmentedMap)
|
||||
{
|
||||
auto it = std::prev(m_Fragmented_mem_map.upper_bound(addr));
|
||||
PAddr old_base = it->first;
|
||||
size_t old_size = it->second;
|
||||
m_Fragmented_mem_map.erase(old_base);
|
||||
|
||||
if (old_size - offset)
|
||||
{
|
||||
if (bStart) { m_Fragmented_mem_map.emplace(old_base + offset, old_size - offset); }
|
||||
else { m_Fragmented_mem_map.emplace(old_base, old_size - offset); }
|
||||
}
|
||||
|
||||
m_PhysicalMemoryInUse -= offset;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto it = m_Mem_map.lower_bound(addr);
|
||||
PAddr old_base = it->first;
|
||||
size_t old_size = it->second;
|
||||
m_Mem_map.erase(old_base);
|
||||
|
||||
if (old_size - offset)
|
||||
{
|
||||
if (bStart) { m_Mem_map.emplace(old_base + offset, old_size - offset); }
|
||||
else { m_Mem_map.emplace(old_base, old_size - offset); }
|
||||
}
|
||||
|
||||
m_PhysicalMemoryInUse -= offset;
|
||||
}
|
||||
}
|
||||
|
||||
void PhysicalMemory::DeAllocatePhysicalMemory(PAddr addr)
|
||||
{
|
||||
auto it = m_Mem_map.lower_bound(addr);
|
||||
|
|
|
@ -104,6 +104,8 @@ class PhysicalMemory
|
|||
PAddr AllocatePhysicalMemoryRange(size_t size, PAddr low_addr, PAddr high_addr);
|
||||
// allocates a block of memory with VirtualAlloc when the main memory is fragmented and sets an error code
|
||||
VAddr AllocateFragmented(size_t size);
|
||||
// shrinks the size af an allocation
|
||||
void ShrinkPhysicalAllocation(PAddr addr, size_t offset, bool bFragmentedMap, bool bStart);
|
||||
// deallocates a block of the mapped file
|
||||
void DeAllocatePhysicalMemory(PAddr addr);
|
||||
// deallocates a block allocated with VirtualAlloc
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
|
||||
// Acknowledgment:
|
||||
// The core logic of the VMManager class is based on the virtual management code of the citra emulator (GPLv2 license),
|
||||
// with some modest changes and adaptions to suit Cxbx-Reloaded and Xbox emulation.
|
||||
// with some changes and adaptions to suit Cxbx-Reloaded and Xbox emulation.
|
||||
// Citra website: https://citra-emu.org/
|
||||
|
||||
|
||||
|
@ -52,7 +52,6 @@ VMManager g_VMManager;
|
|||
/* PTEflags */
|
||||
|
||||
#define Present 1 << 0
|
||||
#define OutsideMapped 1 << 11
|
||||
|
||||
|
||||
/* Global page table */
|
||||
|
@ -70,7 +69,8 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const
|
|||
{
|
||||
assert(base + size == next.base);
|
||||
|
||||
if (permissions != next.permissions || type != next.type) { return false; }
|
||||
if (permissions != next.permissions || type != next.type ||
|
||||
type == VMAType::Lock || next.type == VMAType::Lock) { return false; }
|
||||
if (type == VMAType::Allocated && backing_block != next.backing_block) { return false; }
|
||||
|
||||
return true;
|
||||
|
@ -78,6 +78,29 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const
|
|||
|
||||
void VMManager::Initialize(HANDLE file_view)
|
||||
{
|
||||
// This reserves a large enough memory region to map the second physical memory file view, and aligns the start address.
|
||||
// It must be page aligned otherwise the mapping/unmapping functions will produce incorrect results and on
|
||||
// debug builds they will assert.
|
||||
UINT_PTR addr = (UINT_PTR)VirtualAlloc(NULL, CHIHIRO_MEMORY_SIZE + PAGE_SIZE, MEM_RESERVE, PAGE_EXECUTE_READWRITE);
|
||||
if (!addr)
|
||||
{
|
||||
CxbxKrnlCleanup("VMManager: VirtualAlloc could not find a suitable region to allocate the second physical memory view!");
|
||||
return;
|
||||
}
|
||||
VAddr aligned_start = addr & ~(UINT_PTR)PAGE_MASK;
|
||||
VirtualFree((void*)addr, 0, MEM_RELEASE);
|
||||
m_Base = (VAddr)MapViewOfFileEx(
|
||||
file_view,
|
||||
FILE_MAP_READ | FILE_MAP_WRITE | FILE_MAP_EXECUTE,
|
||||
0,
|
||||
0,
|
||||
CHIHIRO_MEMORY_SIZE,
|
||||
(void*)aligned_start);
|
||||
if (m_Base != aligned_start)
|
||||
{
|
||||
CxbxKrnlCleanup("VMManager: MapViewOfFileEx could not map the second physical memory view!");
|
||||
return;
|
||||
}
|
||||
m_hAliasedView = file_view;
|
||||
|
||||
m_Vma_map.clear();
|
||||
|
@ -87,7 +110,7 @@ void VMManager::Initialize(HANDLE file_view)
|
|||
// Initialize the map with a single free region covering the entire virtual address space, less the first page
|
||||
VirtualMemoryArea initial_vma;
|
||||
initial_vma.base = FIRST_PAGE_ADDR;
|
||||
initial_vma.size = MAX_VIRTUAL_ADDRESS - PAGE_SIZE;
|
||||
initial_vma.size = MAX_VIRTUAL_ADDRESS - PAGE_SIZE + 1;
|
||||
m_Vma_map.emplace(initial_vma.base, initial_vma);
|
||||
UpdatePageTableForVMA(initial_vma);
|
||||
|
||||
|
@ -100,7 +123,7 @@ void VMManager::Initialize(HANDLE file_view)
|
|||
UpdatePageTableForVMA(first_page_vma);
|
||||
|
||||
// D3D uses the first physical page to initialize the push buffer. At the moment, this doesn't seem to be emulated though
|
||||
Allocate(PAGE_SIZE, 0, PAGE_SIZE - 1);
|
||||
Allocate(PAGE_SIZE, 0, PAGE_SIZE - 1, 0, PAGE_SIZE, PAGE_EXECUTE_READWRITE, false);
|
||||
|
||||
// Allocate the nv2a instance memory and the memory holding the PFN database (the latter is not not emulated)
|
||||
// REMARK: I Can't simply call Allocate here since MapMemoryBlock checks if the high addr is higher than m_MaxContiguousAddress,
|
||||
|
@ -115,7 +138,7 @@ void VMManager::Initialize(HANDLE file_view)
|
|||
|
||||
// Allocate memory for the dummy kernel
|
||||
// NOTE: change PAGE_SIZE if the size of the dummy kernel increases!
|
||||
Allocate(sizeof(DUMMY_KERNEL), XBE_IMAGE_BASE, XBE_IMAGE_BASE + PAGE_SIZE - 1);
|
||||
Allocate(sizeof(DUMMY_KERNEL), XBE_IMAGE_BASE, XBE_IMAGE_BASE + PAGE_SIZE - 1, 0, PAGE_SIZE, PAGE_EXECUTE_READWRITE, false);
|
||||
|
||||
// Map the tiled memory
|
||||
VMAIter tiled_memory_vma_handle = CarveVMA(TILED_MEMORY_BASE, TILED_MEMORY_XBOX_SIZE);
|
||||
|
@ -190,17 +213,17 @@ void VMManager::Initialize(HANDLE file_view)
|
|||
|
||||
// Map the mcpx
|
||||
// NOTE: Again, I can't use MCPX_BASE and MCPX_SIZE because those are not aligned
|
||||
VMAIter mcpx_vma_handle = CarveVMA(MAX_VIRTUAL_ADDRESS - PAGE_SIZE, PAGE_SIZE);
|
||||
VMAIter mcpx_vma_handle = CarveVMA(MAX_VIRTUAL_ADDRESS - PAGE_SIZE + 1, PAGE_SIZE);
|
||||
VirtualMemoryArea& mcpx_vma = mcpx_vma_handle->second;
|
||||
mcpx_vma.type = VMAType::DeviceMCPX;
|
||||
MergeAdjacentVMA(mcpx_vma_handle);
|
||||
UpdatePageTableForVMA(mcpx_vma);
|
||||
m_NonImageMemoryInUse += PAGE_SIZE;
|
||||
m_NonImageMemoryInUse += (PAGE_SIZE);
|
||||
|
||||
printf("Page table initialized!\n");
|
||||
}
|
||||
|
||||
void VMManager::InitializeChihiro()
|
||||
void VMManager::InitializeChihiroDebug()
|
||||
{
|
||||
UnmapRange(CONTIGUOUS_MEMORY_BASE + m_MaxContiguousAddress);
|
||||
m_MaxContiguousAddress = CHIHIRO_CONTIGUOUS_MEMORY_LIMIT;
|
||||
|
@ -223,16 +246,23 @@ void VMManager::InitializeChihiro()
|
|||
UpdatePageTableForVMA(tiled_memory_vma);
|
||||
m_NonImageMemoryInUse += TILED_MEMORY_CHIHIRO_SIZE;
|
||||
|
||||
// NOTE: we cannot just call Unmap on the mcpx region because its base + size will overflow to 0x100000000
|
||||
// which will trigger an assert in CarveVMARange
|
||||
m_Vma_map.lower_bound(MAX_VIRTUAL_ADDRESS - PAGE_SIZE + 1)->second.type = VMAType::Free;
|
||||
m_NonImageMemoryInUse -= PAGE_SIZE;
|
||||
|
||||
// Map the bios
|
||||
UnmapRange(MAX_VIRTUAL_ADDRESS - PAGE_SIZE); // unmap the mcpx
|
||||
UnmapRange(BIOS_BASE);
|
||||
VMAIter bios_vma_handle = CarveVMA(BIOS_BASE, BIOS_CHIHIRO_SIZE);
|
||||
VirtualMemoryArea& bios_vma = bios_vma_handle->second;
|
||||
bios_vma.type = VMAType::DeviceBIOS;
|
||||
UpdatePageTableForVMA(bios_vma);
|
||||
m_NonImageMemoryInUse += BIOS_CHIHIRO_SIZE;
|
||||
m_NonImageMemoryInUse += (BIOS_CHIHIRO_SIZE);
|
||||
|
||||
printf("Page table for Chihiro initialized!\n");
|
||||
if (g_bIsChihiro) {
|
||||
printf("Page table for Chihiro initialized!\n");
|
||||
}
|
||||
else { printf("Page table for Debug console initialized!\n"); }
|
||||
}
|
||||
|
||||
void VMManager::MemoryStatistics(xboxkrnl::PMM_STATISTICS memory_statistics)
|
||||
|
@ -240,14 +270,14 @@ void VMManager::MemoryStatistics(xboxkrnl::PMM_STATISTICS memory_statistics)
|
|||
memory_statistics->TotalPhysicalPages = m_MaxPhysicalMemory / PAGE_SIZE;
|
||||
memory_statistics->AvailablePages = (m_MaxPhysicalMemory - m_PhysicalMemoryInUse) / PAGE_SIZE;
|
||||
memory_statistics->VirtualMemoryBytesCommitted = m_ImageMemoryInUse + m_NonImageMemoryInUse;
|
||||
memory_statistics->VirtualMemoryBytesReserved = (ULONG)(MAX_VIRTUAL_ADDRESS - (m_ImageMemoryInUse + m_NonImageMemoryInUse));
|
||||
memory_statistics->VirtualMemoryBytesReserved = 0; // this is the num of bytes reserved with MEM_RESERVE by NtAllocateVirtualMemory
|
||||
memory_statistics->CachePagesCommitted = 0; // not implemented
|
||||
memory_statistics->PoolPagesCommitted = 0; // not implemented
|
||||
memory_statistics->StackPagesCommitted = m_StackMemoryInUse;
|
||||
memory_statistics->ImagePagesCommitted = m_ImageMemoryInUse;
|
||||
}
|
||||
|
||||
VAddr VMManager::Allocate(size_t size, PAddr low_addr, PAddr high_addr, VAddr addr, ULONG Alignment, DWORD protect)
|
||||
VAddr VMManager::Allocate(size_t size, PAddr low_addr, PAddr high_addr, VAddr addr, ULONG Alignment, DWORD protect, bool bNonContiguous)
|
||||
{
|
||||
LOG_FUNC_BEGIN
|
||||
LOG_FUNC_ARG(size);
|
||||
|
@ -256,19 +286,21 @@ VAddr VMManager::Allocate(size_t size, PAddr low_addr, PAddr high_addr, VAddr ad
|
|||
LOG_FUNC_ARG(addr);
|
||||
LOG_FUNC_ARG(Alignment);
|
||||
LOG_FUNC_ARG(protect);
|
||||
LOG_FUNC_ARG(bNonContiguous);
|
||||
LOG_FUNC_END;
|
||||
|
||||
// PAGE_WRITECOMBINE is not allowed for shared memory, unless SEC_WRITECOMBINE flag was specified when calling the
|
||||
// CreateFileMapping function. Considering that Cxbx doesn't emulate the caches, it's probably safe to ignore this flag
|
||||
// PAGE_WRITECOMBINE/PAGE_NOCACHE are not allowed for shared memory, unless SEC_WRITECOMBINE/SEC_NOCACHE flag
|
||||
// was specified when calling the CreateFileMapping function. Considering that Cxbx doesn't emulate the caches,
|
||||
// it's probably safe to ignore these flags
|
||||
|
||||
Lock();
|
||||
size_t aligned_size = (size + PAGE_MASK) & ~PAGE_MASK;
|
||||
VAddr v_addr = MapMemoryBlock(aligned_size, low_addr, high_addr, addr, Alignment);
|
||||
size_t ReturnedSize = size;
|
||||
VAddr v_addr = MapMemoryBlock(&ReturnedSize, low_addr, high_addr, addr, Alignment, bNonContiguous);
|
||||
if (v_addr)
|
||||
{
|
||||
ReprotectVMARange(v_addr, aligned_size, protect & (~PAGE_WRITECOMBINE));
|
||||
ReprotectVMARange(v_addr, ReturnedSize, protect & ~(PAGE_WRITECOMBINE | PAGE_NOCACHE));
|
||||
protect & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY) ?
|
||||
m_ImageMemoryInUse += aligned_size : m_NonImageMemoryInUse += aligned_size;
|
||||
m_ImageMemoryInUse += ReturnedSize : m_NonImageMemoryInUse += ReturnedSize;
|
||||
}
|
||||
Unlock();
|
||||
|
||||
|
@ -277,16 +309,21 @@ VAddr VMManager::Allocate(size_t size, PAddr low_addr, PAddr high_addr, VAddr ad
|
|||
|
||||
VAddr VMManager::AllocateZeroed(size_t size)
|
||||
{
|
||||
LOG_FORWARD(Allocate); // Log AllocateZeroed as the origin of the following RETURN log message
|
||||
LOG_FUNC_ONE_ARG(size);
|
||||
|
||||
VAddr addr = Allocate(size);
|
||||
if (addr)
|
||||
Lock();
|
||||
size_t ReturnedSize = size;
|
||||
VAddr v_addr = MapMemoryBlock(&ReturnedSize, 0, MAXULONG_PTR);
|
||||
if (v_addr)
|
||||
{
|
||||
size_t aligned_size = (size + PAGE_MASK) & ~PAGE_MASK;
|
||||
memset((void*)addr, 0, aligned_size);
|
||||
}
|
||||
ReprotectVMARange(v_addr, ReturnedSize, PAGE_EXECUTE_READWRITE);
|
||||
m_ImageMemoryInUse += ReturnedSize;
|
||||
|
||||
return addr;
|
||||
memset((void*)v_addr, 0, ReturnedSize);
|
||||
}
|
||||
Unlock();
|
||||
|
||||
RETURN(v_addr);
|
||||
}
|
||||
|
||||
VAddr VMManager::AllocateStack(size_t size)
|
||||
|
@ -294,39 +331,26 @@ VAddr VMManager::AllocateStack(size_t size)
|
|||
LOG_FUNC_ONE_ARG(size);
|
||||
|
||||
Lock();
|
||||
size_t aligned_size = (size + PAGE_SIZE + PAGE_MASK) & ~PAGE_MASK;
|
||||
VAddr addr = MapMemoryBlock(aligned_size, 0, MAXULONG_PTR);
|
||||
if (addr)
|
||||
size_t ReturnedSize = size + PAGE_SIZE;
|
||||
VAddr v_addr = MapMemoryBlock(&ReturnedSize, 0, MAXULONG_PTR);
|
||||
if (v_addr)
|
||||
{
|
||||
ReprotectVMARange(addr, PAGE_SIZE, PAGE_NOACCESS);
|
||||
addr += aligned_size;
|
||||
m_StackMemoryInUse += aligned_size;
|
||||
m_Vma_map.lower_bound(v_addr)->second.type = VMAType::Stack;
|
||||
ReprotectVMARange(v_addr, PAGE_SIZE, PAGE_NOACCESS); // guard page of the stack
|
||||
v_addr += ReturnedSize;
|
||||
m_StackMemoryInUse += ReturnedSize;
|
||||
}
|
||||
Unlock();
|
||||
|
||||
RETURN(addr);
|
||||
RETURN(v_addr);
|
||||
}
|
||||
|
||||
void VMManager::DeallocateOverlapped(VAddr addr)
|
||||
void VMManager::Deallocate(VAddr addr, size_t size)
|
||||
{
|
||||
LOG_FUNC_ONE_ARG(addr);
|
||||
|
||||
Lock();
|
||||
VAddr aligned_addr = addr & ~(UINT_PTR)PAGE_MASK;
|
||||
auto it = m_Vma_map.lower_bound(aligned_addr);
|
||||
if (it->first == aligned_addr && it->second.type != VMAType::Free)
|
||||
{
|
||||
UnmapRange(aligned_addr);
|
||||
}
|
||||
Unlock();
|
||||
}
|
||||
|
||||
void VMManager::Deallocate(VAddr addr)
|
||||
{
|
||||
LOG_FUNC_ONE_ARG(addr);
|
||||
|
||||
Lock();
|
||||
UnmapRange(addr);
|
||||
UnmapRange(addr, size);
|
||||
Unlock();
|
||||
}
|
||||
|
||||
|
@ -348,11 +372,12 @@ void VMManager::Protect(VAddr target, size_t size, DWORD new_perms)
|
|||
LOG_FUNC_ARG(new_perms);
|
||||
LOG_FUNC_END;
|
||||
|
||||
// PAGE_WRITECOMBINE is not allowed for shared memory, unless SEC_WRITECOMBINE flag was specified when calling the
|
||||
// CreateFileMapping function. Considering that Cxbx doesn't emulate the caches, it's probably safe to ignore this flag
|
||||
// PAGE_WRITECOMBINE/PAGE_NOCACHE are not allowed for shared memory, unless SEC_WRITECOMBINE/SEC_NOCACHE flag
|
||||
// was specified when calling the CreateFileMapping function. Considering that Cxbx doesn't emulate the caches,
|
||||
// it's probably safe to ignore these flags
|
||||
|
||||
Lock();
|
||||
ReprotectVMARange(target, size, new_perms & (~PAGE_WRITECOMBINE));
|
||||
ReprotectVMARange(target, size, new_perms & ~(PAGE_WRITECOMBINE | PAGE_NOCACHE));
|
||||
Unlock();
|
||||
}
|
||||
|
||||
|
@ -422,11 +447,50 @@ size_t VMManager::QuerySize(VAddr addr)
|
|||
RETURN(size);
|
||||
}
|
||||
|
||||
VAddr VMManager::MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VAddr addr, ULONG Alignment)
|
||||
VAddr VMManager::MapMemoryBlock(size_t* size, PAddr low_addr, PAddr high_addr, VAddr addr, ULONG Alignment, bool bNonContiguous)
|
||||
{
|
||||
// Find a free memory block for the allocation, if any
|
||||
u32 offset;
|
||||
size_t aligned_size = size;
|
||||
size_t aligned_size = (*size + PAGE_MASK) & ~PAGE_MASK;
|
||||
|
||||
if (addr)
|
||||
{
|
||||
// REMARK: the following assumes that there is only one VMAType::Free between VAddr and VAddr + size. This is fine for XeUnloadSection,
|
||||
// not so for NtAllocateVirtualMemory. For that, an approch similar to UnmapRange (in particular VMAType::Lock) should be used.
|
||||
// Also, NtAllocateVirtualMemory should have its own implementation function, so that this one can be split.
|
||||
|
||||
addr &= ~PAGE_MASK;
|
||||
VMAIter vma_handle = GetVMAIterator(addr);
|
||||
|
||||
// base address is outside the range managed by the kernel
|
||||
assert(vma_handle != m_Vma_map.end());
|
||||
|
||||
if (vma_handle->second.type == VMAType::Allocated || vma_handle->second.type == VMAType::Stack)
|
||||
{
|
||||
// region is overlapped (base must lie inside the allocated vma)
|
||||
assert(addr < vma_handle->second.base + vma_handle->second.size);
|
||||
|
||||
size_t overlapped_size = vma_handle->second.base + vma_handle->second.size - addr;
|
||||
if (addr + aligned_size <= vma_handle->second.base + vma_handle->second.size)
|
||||
{
|
||||
// region is totally inside the existing allocation, so there's nothing new to commit. Bail out now
|
||||
*size = overlapped_size;
|
||||
return addr;
|
||||
}
|
||||
|
||||
auto next_vma = std::next(vma_handle);
|
||||
addr = next_vma->first;
|
||||
if (next_vma->second.base + aligned_size - overlapped_size > std::next(next_vma)->first)
|
||||
{
|
||||
aligned_size = std::next(next_vma)->first - next_vma->first;
|
||||
}
|
||||
else
|
||||
{
|
||||
aligned_size -= overlapped_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (high_addr == MAXULONG_PTR)
|
||||
{
|
||||
offset = AllocatePhysicalMemory(aligned_size);
|
||||
|
@ -434,7 +498,6 @@ VAddr VMManager::MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VA
|
|||
else
|
||||
{
|
||||
PAddr aligned_low = low_addr & ~(UINT_PTR)(Alignment - 1);
|
||||
// Increase high_addr of one page to handle the case when low/high addr are exactly at the boundary of the allocation
|
||||
PAddr aligned_high = (high_addr + PAGE_SIZE) & ~(UINT_PTR)(Alignment - 1);
|
||||
if (aligned_high > m_MaxContiguousAddress) { aligned_high = m_MaxContiguousAddress; }
|
||||
if (aligned_low > aligned_high) { aligned_low = aligned_high - PAGE_SIZE; }
|
||||
|
@ -448,12 +511,14 @@ VAddr VMManager::MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VA
|
|||
{
|
||||
case PMEMORY_SUCCESS:
|
||||
{
|
||||
if (!addr) {
|
||||
addr = CONTIGUOUS_MEMORY_BASE + offset; // VAddr is simply the offset from the base address inside the second file view
|
||||
} else {
|
||||
addr &= ~PAGE_MASK;
|
||||
if (!addr)
|
||||
{
|
||||
if (bNonContiguous) {
|
||||
addr = m_Base + offset;
|
||||
}
|
||||
else { addr = CONTIGUOUS_MEMORY_BASE + offset; } // VAddr is simply the offset from the base of the contiguous memory
|
||||
}
|
||||
|
||||
|
||||
VMAIter vma_handle = CarveVMA(addr, aligned_size);
|
||||
|
||||
VirtualMemoryArea& final_vma = vma_handle->second;
|
||||
|
@ -469,16 +534,14 @@ VAddr VMManager::MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VA
|
|||
|
||||
case PMEMORY_ALLOCATE_FRAGMENTED:
|
||||
{
|
||||
if (!addr) {
|
||||
if (!addr)
|
||||
addr = offset; // VAddr is the aligned address returned by VirtualAlloc
|
||||
} else {
|
||||
addr &= ~PAGE_MASK;
|
||||
}
|
||||
|
||||
VMAIter vma_handle = CarveVMA(addr, aligned_size);
|
||||
|
||||
VirtualMemoryArea& final_vma = vma_handle->second;
|
||||
final_vma.type = VMAType::Fragmented;
|
||||
final_vma.type = VMAType::Allocated;
|
||||
final_vma.bFragmented = true;
|
||||
final_vma.permissions = PAGE_EXECUTE_READWRITE;
|
||||
final_vma.backing_block = offset;
|
||||
|
||||
|
@ -497,81 +560,80 @@ VAddr VMManager::MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VA
|
|||
CxbxKrnlCleanup("Unknown error code in Physical Memory class.");
|
||||
}
|
||||
|
||||
*size = aligned_size;
|
||||
return addr;
|
||||
}
|
||||
|
||||
void VMManager::UnmapRange(VAddr target, bool StackFlag)
|
||||
void VMManager::UnmapRange(VAddr target, size_t size)
|
||||
{
|
||||
VAddr aligned_start = target & ~(UINT_PTR)PAGE_MASK;
|
||||
if (aligned_start == 0) // forbidden page
|
||||
{
|
||||
// This should also generate a STATUS_GUARD_PAGE_VIOLATION exception from Windows
|
||||
CxbxKrnlCleanup("Access to guarded page 0x0 detected!\n");
|
||||
}
|
||||
size_t aligned_size = (size + PAGE_MASK) & ~PAGE_MASK;
|
||||
|
||||
auto it = m_Vma_map.lower_bound(aligned_start);
|
||||
if (it->first != aligned_start)
|
||||
if (aligned_size != 0)
|
||||
{
|
||||
CxbxKrnlCleanup("An attempt to deallocate a region not allocated by the manager has been detected!");
|
||||
/*if (aligned_size == 0) {
|
||||
CxbxKrnlCleanup("An attempt to deallocate a region not allocated by the manager has been detected!");
|
||||
}*/
|
||||
|
||||
VAddr EndingAddress = aligned_start + aligned_size;
|
||||
size_t overlapped_size_start = std::prev(it)->second.base + std::prev(it)->second.size - aligned_start;
|
||||
VirtualMemoryArea start_vma;
|
||||
VirtualMemoryArea end_vma;
|
||||
start_vma.base = aligned_start;
|
||||
start_vma.type = VMAType::Lock;
|
||||
start_vma.size = overlapped_size_start;
|
||||
ResizeVMA(std::prev(it), overlapped_size_start, false);
|
||||
auto low_it = m_Vma_map.emplace(aligned_start, start_vma).first;
|
||||
auto high_pair = m_Vma_map.emplace(EndingAddress, end_vma);
|
||||
|
||||
if (high_pair.second)
|
||||
{
|
||||
size_t overlapped_size_end = EndingAddress - std::prev(high_pair.first)->first;
|
||||
end_vma.base = EndingAddress;
|
||||
end_vma.size = overlapped_size_end;
|
||||
end_vma.type = VMAType::Lock;
|
||||
ResizeVMA(std::prev(high_pair.first), overlapped_size_end, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
end_vma.type = high_pair.first->second.type; // backup the existing vma type
|
||||
high_pair.first->second.type = VMAType::Lock;
|
||||
}
|
||||
|
||||
auto start_it = std::next(low_it); // skip the first locked vma
|
||||
for (; start_it != high_pair.first;)
|
||||
{
|
||||
start_it = DestructVMA(start_it, start_it->second.base, start_it->second.size);
|
||||
}
|
||||
|
||||
if (high_pair.second)
|
||||
{
|
||||
low_it->second.type = VMAType::Free;
|
||||
high_pair.first->second.type = VMAType::Free;
|
||||
MergeAdjacentVMA(std::prev(start_it));
|
||||
}
|
||||
else
|
||||
{
|
||||
low_it->second.type = VMAType::Free;
|
||||
start_it->second.type = end_vma.type; // restore previously saved vma type
|
||||
MergeAdjacentVMA(std::prev(start_it));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
switch (it->second.type)
|
||||
{
|
||||
case VMAType::Free:
|
||||
case VMAType::MemTiled:
|
||||
case VMAType::IO_DeviceNV2A:
|
||||
case VMAType::MemNV2A_PRAMIN:
|
||||
case VMAType::IO_DeviceAPU:
|
||||
case VMAType::IO_DeviceAC97:
|
||||
case VMAType::IO_DeviceUSB0:
|
||||
case VMAType::IO_DeviceUSB1:
|
||||
case VMAType::IO_DeviceNVNet:
|
||||
case VMAType::DeviceBIOS:
|
||||
case VMAType::DeviceMCPX:
|
||||
{
|
||||
CxbxKrnlCleanup("An attempt to deallocate a region not allocated by the manager has been detected! \
|
||||
The type was %u", it->second.type);
|
||||
}
|
||||
default:
|
||||
{
|
||||
size_t aligned_size = it->second.size;
|
||||
if (StackFlag)
|
||||
{
|
||||
m_StackMemoryInUse -= aligned_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
it->second.permissions & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY) ?
|
||||
m_ImageMemoryInUse -= aligned_size : m_NonImageMemoryInUse -= aligned_size;
|
||||
}
|
||||
if (it->second.type == VMAType::Allocated)
|
||||
{
|
||||
DeAllocatePhysicalMemory(it->second.backing_block);
|
||||
}
|
||||
else if (it->second.type == VMAType::Fragmented)
|
||||
{
|
||||
DeAllocateFragmented(it->second.backing_block);
|
||||
}
|
||||
VMAIter vma = CarveVMARange(aligned_start, aligned_size);
|
||||
|
||||
VAddr target_end = aligned_start + aligned_size;
|
||||
VMAIter end = m_Vma_map.end();
|
||||
// The comparison against the end of the range must be done using addresses since vma's can be
|
||||
// merged during this process, causing invalidation of the iterators
|
||||
while (vma != end && vma->second.base < target_end)
|
||||
{
|
||||
vma = std::next(Unmap(vma));
|
||||
}
|
||||
}
|
||||
if (it->second.type == VMAType::Free || it->first != aligned_start) {
|
||||
CxbxKrnlCleanup("An attempt to deallocate a region not allocated by the manager has been detected!");
|
||||
}
|
||||
|
||||
DestructVMA(it, aligned_start, it->second.size);
|
||||
}
|
||||
}
|
||||
|
||||
void VMManager::ReprotectVMARange(VAddr target, size_t size, DWORD new_perms)
|
||||
{
|
||||
VAddr aligned_start = target & ~(UINT_PTR)PAGE_MASK;
|
||||
size_t aligned_size = ((target + size - 1 + PAGE_SIZE) &
|
||||
~(UINT_PTR)PAGE_MASK) - aligned_start;
|
||||
size_t aligned_size = (size + PAGE_MASK) & ~PAGE_MASK;
|
||||
|
||||
VMAIter vma = CarveVMARange(aligned_start, aligned_size);
|
||||
VAddr target_end = aligned_start + aligned_size;
|
||||
|
@ -622,14 +684,6 @@ void VMManager::MapMemoryRegion(VAddr base, size_t size, PAddr target)
|
|||
MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, Present);
|
||||
}
|
||||
|
||||
void VMManager::MapSpecialRegion(VAddr base, size_t size, PAddr target)
|
||||
{
|
||||
assert((base & PAGE_MASK) == 0); // unaligned address
|
||||
assert((size & PAGE_MASK) == 0); // unaligned size
|
||||
|
||||
MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, Present | OutsideMapped);
|
||||
}
|
||||
|
||||
void VMManager::UnmapRegion(VAddr base, size_t size)
|
||||
{
|
||||
assert((base & PAGE_MASK) == 0); // unaligned address
|
||||
|
@ -638,7 +692,6 @@ void VMManager::UnmapRegion(VAddr base, size_t size)
|
|||
MapPages(base / PAGE_SIZE, size / PAGE_SIZE, NULL, Present ^ Present);
|
||||
}
|
||||
|
||||
|
||||
VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle)
|
||||
{
|
||||
VirtualMemoryArea& vma = vma_handle->second;
|
||||
|
@ -677,10 +730,10 @@ VMManager::VMAIter VMManager::CarveVMA(VAddr base, size_t size)
|
|||
|
||||
VirtualMemoryArea& vma = vma_handle->second;
|
||||
|
||||
// region is already allocated
|
||||
assert(vma.type == VMAType::Free);
|
||||
// region is not allocated (must be VMAType::Free)
|
||||
assert(vma_handle->second.type == VMAType::Free);
|
||||
|
||||
u32 start_in_vma = base - vma.base; // VAddr - start addr of vma region found (must be VMAType::Free)
|
||||
u32 start_in_vma = base - vma_handle->second.base; // VAddr - start addr of vma region found
|
||||
u32 end_in_vma = start_in_vma + size; // end addr of new vma
|
||||
|
||||
// requested allocation doesn't fit inside vma
|
||||
|
@ -704,12 +757,12 @@ VMManager::VMAIter VMManager::CarveVMARange(VAddr base, size_t size)
|
|||
{
|
||||
VAddr target_end = base + size;
|
||||
assert(target_end >= base);
|
||||
assert(target_end <= MAX_VIRTUAL_ADDRESS - PAGE_SIZE);
|
||||
assert(target_end <= MAX_VIRTUAL_ADDRESS);
|
||||
assert(size > 0);
|
||||
|
||||
VMAIter begin_vma = GetVMAIterator(base);
|
||||
VMAIter i_end = m_Vma_map.lower_bound(target_end);
|
||||
for (auto i = begin_vma; i != i_end; ++i)
|
||||
VMAIter it_end = m_Vma_map.lower_bound(target_end);
|
||||
for (auto i = begin_vma; i != it_end; ++i)
|
||||
{
|
||||
if (i->second.type == VMAType::Free) { assert(0); }
|
||||
}
|
||||
|
@ -750,27 +803,27 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma)
|
|||
return m_Vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
|
||||
}
|
||||
|
||||
VMManager::VMAIter VMManager::MergeAdjacentVMA(VMAIter it)
|
||||
VMManager::VMAIter VMManager::MergeAdjacentVMA(VMAIter vma_handle)
|
||||
{
|
||||
VMAIter next_vma = std::next(it);
|
||||
if (next_vma != m_Vma_map.end() && it->second.CanBeMergedWith(next_vma->second))
|
||||
VMAIter next_vma = std::next(vma_handle);
|
||||
if (next_vma != m_Vma_map.end() && vma_handle->second.CanBeMergedWith(next_vma->second))
|
||||
{
|
||||
it->second.size += next_vma->second.size;
|
||||
vma_handle->second.size += next_vma->second.size;
|
||||
m_Vma_map.erase(next_vma);
|
||||
}
|
||||
|
||||
if (it != m_Vma_map.begin())
|
||||
if (vma_handle != m_Vma_map.begin())
|
||||
{
|
||||
VMAIter prev_vma = std::prev(it);
|
||||
if (prev_vma->second.CanBeMergedWith(it->second))
|
||||
VMAIter prev_vma = std::prev(vma_handle);
|
||||
if (prev_vma->second.CanBeMergedWith(vma_handle->second))
|
||||
{
|
||||
prev_vma->second.size += it->second.size;
|
||||
m_Vma_map.erase(it);
|
||||
it = prev_vma;
|
||||
prev_vma->second.size += vma_handle->second.size;
|
||||
m_Vma_map.erase(vma_handle);
|
||||
vma_handle = prev_vma;
|
||||
}
|
||||
}
|
||||
|
||||
return it;
|
||||
return vma_handle;
|
||||
}
|
||||
|
||||
VMManager::VMAIter VMManager::ReprotectVMA(VMAIter vma_handle, DWORD new_perms)
|
||||
|
@ -808,18 +861,91 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma)
|
|||
break;
|
||||
|
||||
case VMAType::Allocated:
|
||||
case VMAType::Stack:
|
||||
{
|
||||
MapMemoryRegion(vma.base, vma.size, vma.backing_block);
|
||||
}
|
||||
break;
|
||||
|
||||
case VMAType::Fragmented:
|
||||
{
|
||||
MapSpecialRegion(vma.base, vma.size, vma.backing_block);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
CxbxKrnlCleanup("Unknown VMAType in UpdatePageTableForVMA");
|
||||
CxbxKrnlCleanup("VMAType::Lock or Unknown type in UpdatePageTableForVMA");
|
||||
}
|
||||
}
|
||||
|
||||
VMManager::VMAIter VMManager::DestructVMA(VMAIter vma_handle, VAddr addr, size_t size)
|
||||
{
|
||||
if (vma_handle->second.type == VMAType::Free) { return std::next(vma_handle); }
|
||||
|
||||
|
||||
if (vma_handle->second.type != VMAType::Stack)
|
||||
{
|
||||
vma_handle->second.permissions & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY) ?
|
||||
m_ImageMemoryInUse -= size : m_NonImageMemoryInUse -= size;
|
||||
}
|
||||
else { m_StackMemoryInUse -= size; }
|
||||
|
||||
|
||||
if (vma_handle->second.type == VMAType::Allocated || vma_handle->second.type == VMAType::Stack)
|
||||
{
|
||||
if (vma_handle->second.bFragmented) { DeAllocateFragmented(vma_handle->second.backing_block); }
|
||||
else { DeAllocatePhysicalMemory(vma_handle->second.backing_block); }
|
||||
}
|
||||
|
||||
VMAIter vma = CarveVMARange(addr, size);
|
||||
|
||||
VAddr target_end = addr + size;
|
||||
VMAIter end = m_Vma_map.end();
|
||||
// The comparison against the end of the range must be done using addresses since vma's can be
|
||||
// merged during this process, causing invalidation of the iterators
|
||||
while (vma != end && vma->second.base < target_end)
|
||||
{
|
||||
vma = std::next(Unmap(vma));
|
||||
}
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
void VMManager::ResizeVMA(VMAIter vma_handle, size_t offset, bool bStart)
|
||||
{
|
||||
if (!offset) { return; } // nothing to do
|
||||
|
||||
VirtualMemoryArea& old_vma = vma_handle->second;
|
||||
VirtualMemoryArea new_vma = old_vma;
|
||||
|
||||
if (bStart)
|
||||
{
|
||||
if (offset > old_vma.size) { return; } // sanity check
|
||||
VAddr new_base = old_vma.base + offset;
|
||||
new_vma.base = new_base;
|
||||
new_vma.size = old_vma.size - offset;
|
||||
|
||||
if (old_vma.type == VMAType::Allocated || old_vma.type == VMAType::Stack) {
|
||||
ShrinkPhysicalAllocation(vma_handle->second.backing_block, offset, vma_handle->second.bFragmented, bStart);
|
||||
}
|
||||
m_Vma_map.erase(old_vma.base);
|
||||
if(new_vma.size) { m_Vma_map.emplace(new_base, new_vma); }
|
||||
}
|
||||
else
|
||||
{
|
||||
if (offset > old_vma.size) { return; } // sanity check
|
||||
VAddr new_base = old_vma.base;
|
||||
new_vma.base = new_base;
|
||||
new_vma.size = old_vma.size - offset;
|
||||
|
||||
if (old_vma.type == VMAType::Allocated || old_vma.type == VMAType::Stack) {
|
||||
ShrinkPhysicalAllocation(vma_handle->second.backing_block, offset, vma_handle->second.bFragmented, bStart);
|
||||
}
|
||||
m_Vma_map.erase(old_vma.base);
|
||||
if (new_vma.size) { m_Vma_map.emplace(new_base, new_vma); }
|
||||
}
|
||||
|
||||
if (new_vma.type != VMAType::Free)
|
||||
{
|
||||
if (new_vma.type != VMAType::Stack)
|
||||
{
|
||||
new_vma.permissions & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY) ?
|
||||
m_ImageMemoryInUse -= offset : m_NonImageMemoryInUse -= offset;
|
||||
}
|
||||
else { m_StackMemoryInUse -= offset; }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,8 +49,8 @@ enum class VMAType : u32
|
|||
Free,
|
||||
// vma represents allocated memory
|
||||
Allocated,
|
||||
// vma represents allocated memory mapped outside the second file view (allocated by VirtualAlloc)
|
||||
Fragmented,
|
||||
// stack allocation
|
||||
Stack,
|
||||
// tiled memory
|
||||
MemTiled,
|
||||
// nv2a
|
||||
|
@ -71,6 +71,8 @@ enum class VMAType : u32
|
|||
DeviceBIOS,
|
||||
// mcpx rom (retail xbox only)
|
||||
DeviceMCPX,
|
||||
// mark this vma as non-mergeable
|
||||
Lock,
|
||||
};
|
||||
|
||||
|
||||
|
@ -88,6 +90,8 @@ struct VirtualMemoryArea
|
|||
DWORD permissions = PAGE_NOACCESS;
|
||||
// addr of the memory backing this block, if any
|
||||
PAddr backing_block = NULL;
|
||||
// this allocation was served by VirtualAlloc
|
||||
bool bFragmented = false;
|
||||
// tests if this area can be merged to the right with 'next'
|
||||
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
|
||||
};
|
||||
|
@ -104,30 +108,30 @@ class VMManager : public PhysicalMemory
|
|||
~VMManager()
|
||||
{
|
||||
DeleteCriticalSection(&m_CriticalSection);
|
||||
UnmapViewOfFile((void *)CONTIGUOUS_MEMORY_BASE);
|
||||
UnmapViewOfFile((void*)TILED_MEMORY_BASE);
|
||||
FlushViewOfFile((void*)CONTIGUOUS_MEMORY_BASE, CHIHIRO_MEMORY_SIZE);
|
||||
FlushFileBuffers(m_hAliasedView);
|
||||
UnmapViewOfFile((void *)m_Base);
|
||||
UnmapViewOfFile((void *)CONTIGUOUS_MEMORY_BASE);
|
||||
UnmapViewOfFile((void*)TILED_MEMORY_BASE);
|
||||
CloseHandle(m_hAliasedView);
|
||||
}
|
||||
// initializes the page table to the default configuration
|
||||
void Initialize(HANDLE file_view);
|
||||
// initialize chihiro - specifc memory ranges
|
||||
void InitializeChihiro();
|
||||
// initialize chihiro/debug - specifc memory ranges
|
||||
void InitializeChihiroDebug();
|
||||
// retrieves memory statistics
|
||||
void MemoryStatistics(xboxkrnl::PMM_STATISTICS memory_statistics);
|
||||
// allocates a block of memory
|
||||
VAddr Allocate(size_t size, PAddr low_addr = 0, PAddr high_addr = MAXULONG_PTR, VAddr addr = NULL, ULONG Alignment = PAGE_SIZE, DWORD protect = PAGE_EXECUTE_READWRITE);
|
||||
VAddr Allocate(size_t size, PAddr low_addr = 0, PAddr high_addr = MAXULONG_PTR, VAddr addr = NULL, ULONG Alignment = PAGE_SIZE,
|
||||
DWORD protect = PAGE_EXECUTE_READWRITE, bool bNonContiguous = true);
|
||||
// allocates a block of memory and zeros it
|
||||
VAddr AllocateZeroed(size_t size);
|
||||
// allocates stack memory
|
||||
VAddr AllocateStack(size_t size);
|
||||
// deallocate a block of memory
|
||||
void Deallocate(VAddr addr);
|
||||
void Deallocate(VAddr addr, size_t size = 0);
|
||||
// deallocate stack memory
|
||||
void DeallocateStack(VAddr addr);
|
||||
// checks if an overlapped memory block is present and, if so, deallocates it
|
||||
void DeallocateOverlapped(VAddr addr);
|
||||
// changes the protections of a memory region
|
||||
void Protect(VAddr target, size_t size, DWORD new_perms);
|
||||
// query if a VAddr is valid
|
||||
|
@ -147,6 +151,8 @@ class VMManager : public PhysicalMemory
|
|||
std::map<VAddr, VirtualMemoryArea> m_Vma_map;
|
||||
// handle of the second file view region
|
||||
HANDLE m_hAliasedView = NULL;
|
||||
// start address of the memory region to which map non-contiguous allocations in the virtual space
|
||||
VAddr m_Base = 0;
|
||||
// critical section lock to synchronize accesses
|
||||
CRITICAL_SECTION m_CriticalSection;
|
||||
// amount of image virtual memory in use
|
||||
|
@ -157,9 +163,9 @@ class VMManager : public PhysicalMemory
|
|||
size_t m_StackMemoryInUse = 0;
|
||||
|
||||
// creates a vma block to be mapped in memory at the specified VAddr, if requested
|
||||
VAddr MapMemoryBlock(size_t size, PAddr low_addr, PAddr high_addr, VAddr addr = NULL, ULONG Alignment = PAGE_SIZE);
|
||||
VAddr MapMemoryBlock(size_t* size, PAddr low_addr, PAddr high_addr, VAddr addr = NULL, ULONG Alignment = PAGE_SIZE, bool bNonContiguous = true);
|
||||
// creates a vma representing the memory block to remove
|
||||
void UnmapRange(VAddr target, bool StackFlag = false);
|
||||
void UnmapRange(VAddr target, size_t size = 0);
|
||||
// changes access permissions for a range of vma's, splitting them if necessary
|
||||
void ReprotectVMARange(VAddr target, size_t size, DWORD new_perms);
|
||||
// checks if a VAddr is valid; returns false if not
|
||||
|
@ -168,8 +174,6 @@ class VMManager : public PhysicalMemory
|
|||
PAddr TranslateVAddrToPAddr(const VAddr addr);
|
||||
// maps a new allocation in the virtual address space
|
||||
void MapMemoryRegion(VAddr base, size_t size, PAddr target);
|
||||
// maps a special allocation outside the virtual address space of the second file view
|
||||
void MapSpecialRegion(VAddr base, size_t size, PAddr target);
|
||||
// removes an allocation from the virtual address space
|
||||
void UnmapRegion(VAddr base, size_t size);
|
||||
// removes a vma block from the mapped memory
|
||||
|
@ -185,7 +189,7 @@ class VMManager : public PhysicalMemory
|
|||
// splits a parent vma into two children
|
||||
VMAIter SplitVMA(VMAIter vma_handle, u32 offset_in_vma);
|
||||
// merges the specified vma with adjacent ones if possible
|
||||
VMAIter MergeAdjacentVMA(VMAIter iter);
|
||||
VMAIter MergeAdjacentVMA(VMAIter vma_handle);
|
||||
// changes access permissions for a vma
|
||||
VMAIter ReprotectVMA(VMAIter vma_handle, DWORD new_perms);
|
||||
// updates the page table
|
||||
|
@ -194,6 +198,10 @@ class VMManager : public PhysicalMemory
|
|||
void Lock();
|
||||
// releases the critical section
|
||||
void Unlock();
|
||||
// destructs a vma if not free already
|
||||
VMAIter DestructVMA(VMAIter vma_handle, VAddr addr, size_t size);
|
||||
// changes the size/base of a vma
|
||||
void ResizeVMA(VMAIter vma_handle, size_t offset, bool bStart);
|
||||
};
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue