More bug fixes (Xbox-specific code seems to be working fine now. Test: DOA ultimate)

This commit is contained in:
ergo720 2018-03-17 01:16:26 +01:00
parent 34e27c2611
commit 0bf3ecaf62
6 changed files with 208 additions and 159 deletions

View File

@ -173,6 +173,9 @@ typedef long NTSTATUS;
#define STATUS_OBJECT_NAME_NOT_FOUND ((DWORD )0xC0000034L)
#define STATUS_OBJECT_NAME_COLLISION ((DWORD )0xC0000035L)
#define STATUS_INVALID_PAGE_PROTECTION ((DWORD )0xC0000045L)
#define STATUS_CONFLICTING_ADDRESSES ((DWORD )0xC0000018L)
#define STATUS_UNABLE_TO_FREE_VM ((DWORD )0xC000001AL)
#define STATUS_MEMORY_NOT_ALLOCATED ((DWORD )0xC00000A0L)
// ******************************************************************
// * Registry value types

View File

@ -827,15 +827,6 @@ void CxbxKrnlMain(int argc, char* argv[])
g_VMManager.XbAllocateVirtualMemory(&XbeBase, 0, &ImageSize, XBOX_MEM_RESERVE, XBOX_PAGE_READWRITE);
g_VMManager.XbAllocateVirtualMemory(&XbeBase, 0, &HeaderSize, XBOX_MEM_COMMIT, XBOX_PAGE_READWRITE);
if (XBE_MAX_VA - ImageSize - XBE_IMAGE_BASE > 0)
{
// We also reserve the remaining region up to XBE_MAX_VA since we know it's occupied by our memory
// placeholder and cannot be allocated anyway. This will result in an increase in the reserved memory
VAddr ReservedBase = XBE_IMAGE_BASE + ROUND_UP_4K(ImageSize);
size_t ReservedSize = ROUND_DOWN_4K(XBE_MAX_VA - ImageSize - XBE_IMAGE_BASE);
g_VMManager.XbAllocateVirtualMemory(&ReservedBase, 0, &ReservedSize, XBOX_MEM_RESERVE, XBOX_PAGE_NOACCESS);
}
// Copy over loaded Xbe Headers to specified base address
memcpy((void*)CxbxKrnl_Xbe->m_Header.dwBaseAddr, &CxbxKrnl_Xbe->m_Header, sizeof(Xbe::Header));

View File

@ -144,7 +144,7 @@ void PhysicalMemory::WritePte(PMMPTE pPteStart, PMMPTE pPteEnd, MMPTE Pte, PFN p
// GetPfnOfPT will assert
PMMPTE PointerPte = pPteStart;
XBOX_PFN PTpfn;
PXBOX_PFN PTpfn;
if (bZero)
{
@ -155,7 +155,7 @@ void PhysicalMemory::WritePte(PMMPTE pPteStart, PMMPTE pPteEnd, MMPTE Pte, PFN p
PTpfn = GetPfnOfPT(PointerPte);
}
WRITE_ZERO_PTE(PointerPte);
PTpfn.PTPageFrame.PtesUsed--;
PTpfn->PTPageFrame.PtesUsed--;
PointerPte++;
}
}
@ -171,7 +171,7 @@ void PhysicalMemory::WritePte(PMMPTE pPteStart, PMMPTE pPteEnd, MMPTE Pte, PFN p
{
Pte.Hardware.PFN = pfn;
WRITE_PTE(PointerPte, Pte);
PTpfn.PTPageFrame.PtesUsed++;
PTpfn->PTPageFrame.PtesUsed++;
}
PointerPte++;
pfn++;
@ -556,15 +556,23 @@ DWORD PhysicalMemory::PatchXboxPermissions(DWORD Perms)
switch (Perms & (XBOX_PAGE_READONLY | XBOX_PAGE_READWRITE))
{
case 0:
{
// One of XBOX_PAGE_READONLY or XBOX_PAGE_READWRITE must be specified
DbgPrintf("PMEM: PatchXboxPermissions: Memory permissions bug detected\n");
return XBOX_PAGE_EXECUTE_READWRITE;
}
case XBOX_PAGE_READONLY:
{
Perms |= (~XBOX_PAGE_READONLY);
Perms &= (~XBOX_PAGE_READONLY);
return Perms | XBOX_PAGE_EXECUTE_READ;
}
case XBOX_PAGE_READWRITE:
{
Perms |= (~XBOX_PAGE_READWRITE);
Perms &= (~XBOX_PAGE_READWRITE);
return Perms | XBOX_PAGE_EXECUTE_READWRITE;
}
@ -574,7 +582,7 @@ DWORD PhysicalMemory::PatchXboxPermissions(DWORD Perms)
// input is probably invalid
DbgPrintf("PMEM: PatchXboxPermissions: Memory permissions bug detected\n");
return Perms | XBOX_PAGE_EXECUTE_READWRITE;
return XBOX_PAGE_EXECUTE_READWRITE;
}
}
}
@ -613,6 +621,9 @@ DWORD PhysicalMemory::ConvertXboxToWinProtection(DWORD Perms)
switch (Perms & (XBOX_PAGE_GUARD | XBOX_PAGE_NOCACHE | XBOX_PAGE_WRITECOMBINE))
{
case 0:
break;
case XBOX_PAGE_GUARD:
{
Mask |= PAGE_GUARD;
@ -669,7 +680,6 @@ bool PhysicalMemory::AllocatePT(PFN_COUNT PteNumber, VAddr addr)
PFN_COUNT PdeNumber = ROUND_UP(PteNumber, PTE_PER_PAGE) / PTE_PER_PAGE;
PFN_COUNT PTtoCommit = 0;
PageType BusyType = SystemPageTableType;
int PdeMappedSizeIncrement = 0;
VAddr StartingAddr = addr;
assert(PteNumber);
@ -677,11 +687,11 @@ bool PhysicalMemory::AllocatePT(PFN_COUNT PteNumber, VAddr addr)
for (unsigned int i = 0; i < PdeNumber; ++i)
{
if (GetPdeAddress(StartingAddr += PdeMappedSizeIncrement)->Hardware.Valid == 0)
if (GetPdeAddress(StartingAddr)->Hardware.Valid == 0)
{
PTtoCommit++;
}
PdeMappedSizeIncrement += (4 * ONE_MB);
StartingAddr += (4 * ONE_MB);
}
if (!PTtoCommit)
@ -698,7 +708,6 @@ bool PhysicalMemory::AllocatePT(PFN_COUNT PteNumber, VAddr addr)
return false;
}
if (addr <= HIGHEST_USER_ADDRESS) { BusyType = VirtualPageTableType; }
PdeMappedSizeIncrement = 0;
StartingAddr = addr;
// Now actually commit the PT's. Note that we won't construct the vma's for the PTs since they are outside of all
@ -706,7 +715,7 @@ bool PhysicalMemory::AllocatePT(PFN_COUNT PteNumber, VAddr addr)
for (unsigned int i = 0; i < PdeNumber; ++i)
{
pPde = GetPdeAddress(StartingAddr += PdeMappedSizeIncrement);
pPde = GetPdeAddress(StartingAddr);
if (pPde->Hardware.Valid == 0)
{
// We grab one page at a time to avoid fragmentation issues. The maximum allowed page is m_MaxContiguousPfn
@ -718,7 +727,7 @@ bool PhysicalMemory::AllocatePT(PFN_COUNT PteNumber, VAddr addr)
WRITE_PTE(pPde, TempPte);
WritePfn(pfn, pfn, pPde, BusyType);
}
PdeMappedSizeIncrement += (4 * ONE_MB);
StartingAddr += (4 * ONE_MB);
}
return true;
@ -727,9 +736,8 @@ bool PhysicalMemory::AllocatePT(PFN_COUNT PteNumber, VAddr addr)
void PhysicalMemory::DeallocatePT(PFN_COUNT PteNumber, VAddr addr)
{
PMMPTE pPde;
XBOX_PFN PTpfn;
PXBOX_PFN PTpfn;
PFN_COUNT PdeNumber = ROUND_UP(PteNumber, PTE_PER_PAGE) / PTE_PER_PAGE;
int PdeMappedSizeIncrement = 0;
VAddr StartingAddr = addr;
assert(PteNumber);
@ -737,22 +745,16 @@ void PhysicalMemory::DeallocatePT(PFN_COUNT PteNumber, VAddr addr)
for (unsigned int i = 0; i < PdeNumber; ++i)
{
pPde = GetPdeAddress(StartingAddr += PdeMappedSizeIncrement);
assert(pPde->Hardware.Valid != 0); // pde must be valid at this point
if (g_bIsRetail || g_bIsDebug) {
PTpfn = *XBOX_PFN_ELEMENT(pPde->Hardware.PFN);
}
else { PTpfn = *CHIHIRO_PFN_ELEMENT(pPde->Hardware.PFN); }
assert(PTpfn.PTPageFrame.BusyType == SystemPageTableType ||
PTpfn.PTPageFrame.BusyType == VirtualPageTableType);
pPde = GetPdeAddress(StartingAddr);
PTpfn = GetPfnOfPT(GetPteAddress(StartingAddr));
if (PTpfn.PTPageFrame.PtesUsed == 0)
if (PTpfn->PTPageFrame.PtesUsed == 0)
{
InsertFree(pPde->Hardware.PFN, pPde->Hardware.PFN);
WritePfn(pPde->Hardware.PFN, pPde->Hardware.PFN, pPde, (PageType)PTpfn.PTPageFrame.BusyType, true);
WritePfn(pPde->Hardware.PFN, pPde->Hardware.PFN, pPde, (PageType)PTpfn->PTPageFrame.BusyType, true);
WRITE_ZERO_PTE(pPde);
}
PdeMappedSizeIncrement += (4 * ONE_MB);
StartingAddr += (4 * ONE_MB);
}
}
@ -766,21 +768,21 @@ bool PhysicalMemory::IsMappable(PFN_COUNT PagesRequested, bool bRetailRegion, bo
return ret;
}
XBOX_PFN PhysicalMemory::GetPfnOfPT(PMMPTE pPte)
PXBOX_PFN PhysicalMemory::GetPfnOfPT(PMMPTE pPte)
{
XBOX_PFN PTpfn;
PXBOX_PFN PTpfn;
// GetPteAddress on a pte address will yield the corresponding pde which maps the supplied pte
PMMPTE PointerPde = GetPteAddress(pPte);
// PointerPde should have already been written to by AllocatePT
assert(PointerPde->Hardware.Valid != 0);
if (g_bIsRetail || g_bIsDebug) {
PTpfn = *XBOX_PFN_ELEMENT(PointerPde->Hardware.PFN);
PTpfn = XBOX_PFN_ELEMENT(PointerPde->Hardware.PFN);
}
else { PTpfn = *CHIHIRO_PFN_ELEMENT(PointerPde->Hardware.PFN); }
assert(PTpfn.PTPageFrame.Busy == 1);
assert(PTpfn.PTPageFrame.BusyType == SystemPageTableType ||
PTpfn.PTPageFrame.BusyType == VirtualPageTableType);
else { PTpfn = CHIHIRO_PFN_ELEMENT(PointerPde->Hardware.PFN); }
assert(PTpfn->PTPageFrame.Busy == 1);
assert(PTpfn->PTPageFrame.BusyType == SystemPageTableType ||
PTpfn->PTPageFrame.BusyType == VirtualPageTableType);
return PTpfn;
}

View File

@ -246,7 +246,7 @@ class PhysicalMemory
// write a contiguous range of pte's
void WritePte(PMMPTE pPteStart, PMMPTE pPteEnd, MMPTE Pte, PFN pfn, bool bZero = false);
// retrieves the pfn entry which maps a PT
XBOX_PFN GetPfnOfPT(PMMPTE pPte);
PXBOX_PFN GetPfnOfPT(PMMPTE pPte);
// commit a contiguous number of pages
bool RemoveFree(PFN_COUNT NumberOfPages, PFN* result, PFN_COUNT PfnAlignment, PFN start, PFN end);
// release a contiguous number of pages
@ -265,7 +265,7 @@ class PhysicalMemory
DWORD PatchXboxPermissions(DWORD Perms);
// commit page tables (if necessary)
bool AllocatePT(PFN_COUNT PteNumber, VAddr addr);
// deallocate a PT if possible
// deallocate page tables (if possible)
void DeallocatePT(PFN_COUNT PteNumber, VAddr addr);
// checks if enough free pages are available for the allocation (doesn't account for fragmentation)
bool IsMappable(PFN_COUNT PagesRequested, bool bRetailRegion, bool bDebugRegion);

View File

@ -130,6 +130,18 @@ void VMManager::Initialize(HANDLE memory_view, HANDLE pagetables_view, bool bRes
// Construct the page directory
InitializePageDirectory();
// We also reserve the remaining region from the reserved xbe image memory up to XBE_MAX_VA since we know it's
// occupied by our memory placeholder and cannot be allocated anyway. We cannot just call XbAllocateVirtualMemory
// since that would result in an increase in the reserved memory usage and also, with XBOX_MEM_RESERVE, the starting
// address will be rounded down to a 64K boundary, which will likely result in an incorrect base address
if (XBE_MAX_VA - CxbxKrnl_Xbe->m_Header.dwSizeofImage - XBE_IMAGE_BASE > 0)
{
ConstructVMA(XBE_IMAGE_BASE + ROUND_UP_4K(CxbxKrnl_Xbe->m_Header.dwSizeofImage),
ROUND_DOWN_4K(XBE_MAX_VA - CxbxKrnl_Xbe->m_Header.dwSizeofImage - XBE_IMAGE_BASE), UserRegion, ReservedVma,
false);
}
if (g_bIsChihiro) {
printf(LOG_PREFIX "Page table for Chihiro arcade initialized!\n");
@ -204,7 +216,7 @@ void VMManager::InitializePfnDatabase()
// Construct the pfn's of the kernel pages
AllocateContiguous(KERNEL_SIZE, XBE_IMAGE_BASE, XBE_IMAGE_BASE + ROUND_UP_4K(KERNEL_SIZE) - 1, 0, XBOX_PAGE_EXECUTE_READWRITE);
AllocateContiguous(KERNEL_SIZE, XBE_IMAGE_BASE, XBE_IMAGE_BASE + ROUND_UP_4K(KERNEL_SIZE) - 1, 0, XBOX_PAGE_READWRITE);
PersistMemory(CONTIGUOUS_MEMORY_BASE + XBE_IMAGE_BASE, KERNEL_SIZE, true);
@ -219,24 +231,18 @@ void VMManager::InitializePfnDatabase()
if (g_bIsRetail) {
pfn = XBOX_PFN_DATABASE_PHYSICAL_PAGE;
pfn_end = XBOX_PFN_DATABASE_PHYSICAL_PAGE + 16 - 1;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
PointerPte = GetPteAddress(addr);
EndingPte = GetPteAddress(CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn_end));
}
else if (g_bIsDebug) {
pfn = XBOX_PFN_DATABASE_PHYSICAL_PAGE;
pfn_end = XBOX_PFN_DATABASE_PHYSICAL_PAGE + 32 - 1;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
PointerPte = GetPteAddress(addr);
EndingPte = GetPteAddress(CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn_end));
}
else {
pfn = CHIHIRO_PFN_DATABASE_PHYSICAL_PAGE;
pfn_end = CHIHIRO_PFN_DATABASE_PHYSICAL_PAGE + 32 - 1;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
PointerPte = GetPteAddress(addr);
EndingPte = GetPteAddress(CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn_end));
}
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
PointerPte = GetPteAddress(addr);
EndingPte = GetPteAddress(CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn_end));
TempPte.Default = ValidKernelPteBits | PTE_PERSIST_MASK;
RemoveFree(pfn_end - pfn + 1, &result, 0, pfn, pfn_end);
@ -251,17 +257,14 @@ void VMManager::InitializePfnDatabase()
if (g_bIsRetail || g_bIsDebug) {
pfn = XBOX_INSTANCE_PHYSICAL_PAGE;
pfn_end = XBOX_INSTANCE_PHYSICAL_PAGE + NV2A_INSTANCE_PAGE_COUNT - 1;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
PointerPte = GetPteAddress(addr);
EndingPte = GetPteAddress(CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn_end));
}
else {
pfn = CHIHIRO_INSTANCE_PHYSICAL_PAGE;
pfn_end = CHIHIRO_INSTANCE_PHYSICAL_PAGE + NV2A_INSTANCE_PAGE_COUNT - 1;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
PointerPte = GetPteAddress(addr);
EndingPte = GetPteAddress(CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn_end));
}
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
PointerPte = GetPteAddress(addr);
EndingPte = GetPteAddress(CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn_end));
TempPte.Default = ValidKernelPteBits;
DISABLE_CACHING(TempPte);
@ -292,89 +295,90 @@ void VMManager::InitializePfnDatabase()
void VMManager::ReinitializePfnDatabase()
{
// With a quick reboot the previous pfn's of the persistent allocations are carried over and can be reused, we just
// need to call AllocatePT and construct the vma's for them and we are done
// With a quick reboot the initialization is simplified since the previous pte's of the persistent allocations are carried over and
// can be reused
PFN pfn;
PFN pfn_end;
VAddr addr;
// Construct the pfn of the page used by D3D
pfn = D3D_PHYSICAL_PAGE >> PAGE_SHIFT;
pfn_end = pfn;
addr = CONTIGUOUS_MEMORY_BASE + D3D_PHYSICAL_PAGE;
AllocatePT(pfn_end - pfn + 1, addr);
ConstructVMA(addr, (pfn_end - pfn + 1) << PAGE_SHIFT, ContiguousRegion, AllocatedVma, false);
// Update the allocation of the page used by D3D
RestorePersistentAllocation(CONTIGUOUS_MEMORY_BASE, D3D_PHYSICAL_PAGE, D3D_PHYSICAL_PAGE, ContiguousType);
// Construct the pfn of the page directory
pfn = PAGE_DIRECTORY_PHYSICAL_ADDRESS >> PAGE_SHIFT;
pfn_end = pfn;
addr = CONTIGUOUS_MEMORY_BASE + PAGE_DIRECTORY_PHYSICAL_ADDRESS;
AllocatePT(pfn_end - pfn + 1, addr);
ConstructVMA(addr, (pfn_end - pfn + 1) << PAGE_SHIFT, ContiguousRegion, AllocatedVma, false);
// Update the allocation of the page directory
RestorePersistentAllocation(CONTIGUOUS_MEMORY_BASE + PAGE_DIRECTORY_PHYSICAL_ADDRESS, PAGE_DIRECTORY_PHYSICAL_ADDRESS >> PAGE_SHIFT,
PAGE_DIRECTORY_PHYSICAL_ADDRESS >> PAGE_SHIFT, ContiguousType);
// Construct the pfn's of the kernel pages
pfn = XBE_IMAGE_BASE >> PAGE_SHIFT;
pfn_end = pfn;
addr = CONTIGUOUS_MEMORY_BASE + XBE_IMAGE_BASE;
AllocatePT(pfn_end - pfn + 1, addr);
ConstructVMA(addr, (pfn_end - pfn + 1) << PAGE_SHIFT, ContiguousRegion, AllocatedVma, false);
// Update the allocation of the kernel
RestorePersistentAllocation(CONTIGUOUS_MEMORY_BASE + XBE_IMAGE_BASE, XBE_IMAGE_BASE >> PAGE_SHIFT,
((XBE_IMAGE_BASE + ROUND_UP_4K(KERNEL_SIZE)) >> PAGE_SHIFT) - 1, ContiguousType);
// Construct the pfn's of the pages holding the pfn database
// Update the allocation of the pfn database
if (g_bIsRetail) {
pfn = XBOX_PFN_DATABASE_PHYSICAL_PAGE;
pfn_end = XBOX_PFN_DATABASE_PHYSICAL_PAGE + 16 - 1;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
}
else if (g_bIsDebug) {
pfn = XBOX_PFN_DATABASE_PHYSICAL_PAGE;
pfn_end = XBOX_PFN_DATABASE_PHYSICAL_PAGE + 32 - 1;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
}
else {
pfn = CHIHIRO_PFN_DATABASE_PHYSICAL_PAGE;
pfn_end = CHIHIRO_PFN_DATABASE_PHYSICAL_PAGE + 32 - 1;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
}
AllocatePT(pfn_end - pfn + 1, addr);
ConstructVMA(addr, (pfn_end - pfn + 1) << PAGE_SHIFT, ContiguousRegion, AllocatedVma, false);
RestorePersistentAllocation((VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn), pfn, pfn_end, UnknownType);
if (g_bIsDebug) { m_PhysicalPagesAvailable += 16; m_DebuggerPagesAvailable -= 16; }
// Construct the pfn's of the pages holding the nv2a instance memory
if (g_bIsRetail || g_bIsDebug) {
pfn = XBOX_INSTANCE_PHYSICAL_PAGE;
pfn_end = XBOX_INSTANCE_PHYSICAL_PAGE + NV2A_INSTANCE_PAGE_COUNT - 1;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
}
else {
pfn = CHIHIRO_INSTANCE_PHYSICAL_PAGE;
pfn_end = CHIHIRO_INSTANCE_PHYSICAL_PAGE + NV2A_INSTANCE_PAGE_COUNT - 1;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
}
AllocatePT(pfn_end - pfn + 1, addr);
ConstructVMA(addr, NV2A_INSTANCE_PAGE_COUNT << PAGE_SHIFT, ContiguousRegion, AllocatedVma, false);
if (g_bIsDebug)
{
// Debug kits have two nv2a instance memory, another at the top of the 128 MiB
PFN result;
MMPTE TempPte;
VAddr addr;
pfn += DEBUGKIT_FIRST_UPPER_HALF_PAGE;
pfn_end += DEBUGKIT_FIRST_UPPER_HALF_PAGE;
// Re-construct the allocation of the nv2a instance memory
if (g_bIsRetail || g_bIsDebug) {
pfn = XBOX_INSTANCE_PHYSICAL_PAGE;
pfn_end = XBOX_INSTANCE_PHYSICAL_PAGE + NV2A_INSTANCE_PAGE_COUNT - 1;
}
else {
pfn = CHIHIRO_INSTANCE_PHYSICAL_PAGE;
pfn_end = CHIHIRO_INSTANCE_PHYSICAL_PAGE + NV2A_INSTANCE_PAGE_COUNT - 1;
}
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
TempPte.Default = ValidKernelPteBits;
DISABLE_CACHING(TempPte);
AllocatePT(pfn_end - pfn + 1, addr);
ConstructVMA(addr, NV2A_INSTANCE_PAGE_COUNT << PAGE_SHIFT, ContiguousRegion, AllocatedVma, false);
RemoveFree(pfn_end - pfn + 1, &result, 0, pfn, pfn_end);
RestorePersistentAllocation(addr, pfn, pfn_end, ContiguousType);
WritePte(GetPteAddress(addr), GetPteAddress(CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn_end)), TempPte, pfn);
if (g_bIsDebug)
{
// Debug kits have two nv2a instance memory, another at the top of the 128 MiB
pfn += DEBUGKIT_FIRST_UPPER_HALF_PAGE;
pfn_end += DEBUGKIT_FIRST_UPPER_HALF_PAGE;
addr = (VAddr)CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn);
RemoveFree(pfn_end - pfn + 1, &result, 0, pfn, pfn_end);
RestorePersistentAllocation(addr, pfn, pfn_end, ContiguousType);
WritePte(GetPteAddress(addr), GetPteAddress(CONVERT_PFN_TO_CONTIGUOUS_PHYSICAL(pfn_end)), TempPte, pfn);
}
}
}
void VMManager::RestorePersistentAllocation(VAddr addr, PFN StartingPfn, PFN EndingPfn, PageType Type)
{
AllocatePT(EndingPfn - StartingPfn + 1, addr);
ConstructVMA(addr, (EndingPfn - StartingPfn + 1) << PAGE_SHIFT, ContiguousRegion, AllocatedVma, false);
WritePfn(StartingPfn, EndingPfn, GetPteAddress(addr), Type);
GetPfnOfPT(GetPteAddress(addr))->PTPageFrame.PtesUsed += (EndingPfn - StartingPfn + 1);
}
void VMManager::ConstructVMA(VAddr Start, size_t Size, MemoryRegionType Type, VMAType VmaType, bool bFragFlag, DWORD Perms)
{
VMAIter it_begin = m_MemoryRegionArray[Type].RegionMap.begin();
@ -387,8 +391,8 @@ void VMManager::ConstructVMA(VAddr Start, size_t Size, MemoryRegionType Type, VM
vma.permissions = Perms;
vma.bFragmented = bFragFlag;
// Depending on the splitting done by CarveVMA, there is no guarantee that the next or previous vma's are free.
// We are just going to iterate forward and backward until we find one.
// Depending on the splitting done by CarveVMA and the type of the adiacent vma's, there is no guarantee that the next
// or previous vma's are free. We are just going to iterate forward and backward until we find one.
it = std::next(vma_handle);
@ -490,7 +494,8 @@ void VMManager::MemoryStatistics(xboxkrnl::PMM_STATISTICS memory_statistics)
Lock();
memory_statistics->TotalPhysicalPages = g_SystemMaxMemory >> PAGE_SHIFT;
memory_statistics->AvailablePages = m_PhysicalPagesAvailable;
memory_statistics->AvailablePages = g_bIsDebug && m_bAllowNonDebuggerOnTop64MiB ?
m_PhysicalPagesAvailable + m_DebuggerPagesAvailable : m_PhysicalPagesAvailable;
memory_statistics->VirtualMemoryBytesCommitted = (m_PagesByUsage[VirtualMemoryType] +
m_PagesByUsage[ImageType]) << PAGE_SHIFT;
memory_statistics->VirtualMemoryBytesReserved = m_VirtualMemoryBytesReserved;
@ -660,6 +665,21 @@ void VMManager::RestorePersistentMemory()
PointerPte++;
}
// Zero all the remaining pte's
EndingPte += 1;
FillMemoryUlong((void*)PAGE_TABLES_BASE, (VAddr)GetPteAddress(CONTIGUOUS_MEMORY_BASE) - PAGE_TABLES_BASE, 0);
FillMemoryUlong((void*)EndingPte, PAGE_TABLES_END + 1 - (VAddr)EndingPte, 0);
// Zero all the entries of the PFN database
if (g_bIsRetail) {
FillMemoryUlong((void*)XBOX_PFN_ADDRESS, X64KB, 0); // Xbox: 64 KiB
}
else if (g_bIsChihiro) {
FillMemoryUlong((void*)CHIHIRO_PFN_ADDRESS, X64KB * 2, 0); // Chihiro: 128 KiB
}
else {
FillMemoryUlong((void*)XBOX_PFN_ADDRESS, X64KB * 2, 0); // Debug: 128 KiB
}
// Now we need to restore the launch data page and the frame buffer pointers to their correct values
@ -670,12 +690,20 @@ void VMManager::RestorePersistentMemory()
if (LauchDataAddress != 0 && IS_PHYSICAL_ADDRESS(LauchDataAddress)) {
xboxkrnl::LaunchDataPage = (xboxkrnl::PLAUNCH_DATA_PAGE)LauchDataAddress;
*(VAddr*)(CONTIGUOUS_MEMORY_BASE + PAGE_SIZE - 4) = NULL;
RestorePersistentAllocation(LauchDataAddress, GetPteAddress(LauchDataAddress)->Hardware.PFN,
GetPteAddress(LauchDataAddress)->Hardware.PFN, ContiguousType);
DbgPrintf("VMEM: Restored LaunchDataPage\n");
}
if (FrameBufferAddress != 0 && IS_PHYSICAL_ADDRESS(FrameBufferAddress)) {
xboxkrnl::AvSavedDataAddress = (xboxkrnl::PVOID)FrameBufferAddress;
*(VAddr*)(CONTIGUOUS_MEMORY_BASE + PAGE_SIZE - 8) = NULL;
RestorePersistentAllocation(FrameBufferAddress, GetPteAddress(FrameBufferAddress)->Hardware.PFN,
GetPteAddress(FrameBufferAddress)->Hardware.PFN + (QuerySize(FrameBufferAddress) >> PAGE_SHIFT) - 1, ContiguousType);
DbgPrintf("VMEM: Restored FrameBuffer\n");
}
}
@ -707,7 +735,7 @@ VAddr VMManager::Allocate(size_t Size)
if (!IsMappable(PteNumber, true, g_bIsDebug && m_bAllowNonDebuggerOnTop64MiB ? true : false)) { goto Fail; }
ConvertXboxToSystemPteProtection(XBOX_PAGE_EXECUTE_READWRITE, &TempPte);
ConvertXboxToPteProtection(XBOX_PAGE_EXECUTE_READWRITE, &TempPte);
if (RemoveFree(PteNumber, &pfn, 0, 0, g_bIsDebug && !m_bAllowNonDebuggerOnTop64MiB ? XBOX_HIGHEST_PHYSICAL_PAGE
: m_HighestPage)) // MapViewOfFileEx path
@ -836,26 +864,21 @@ VAddr VMManager::AllocateSystemMemory(PageType BusyType, DWORD Perms, size_t Siz
// Debugger pages are only allocated from the extra 64 MiB available on devkits and are mapped in the
// devkit system region
if (!IsMappable(PagesNumber, false, true)) { goto Fail; }
if (!IsMappable(PteNumber, false, true)) { goto Fail; }
LowestAcceptablePfn = DEBUGKIT_FIRST_UPPER_HALF_PAGE;
HighestAcceptablePfn = m_HighestPage;
MemoryType = DevkitRegion;
}
else { if (!IsMappable(PagesNumber, true, false)) { goto Fail; } }
else { if (!IsMappable(PteNumber, true, false)) { goto Fail; } }
// ergo720: if a guard page is requested, because we only commit the actual stack pages, there is the remote possibility
// that the stack is mapped immediately after the end of a host allocation, since those will appear as free areas and
// the VMManager can't know anything about those. In practice, I wouldn't worry about this case since, if emulation is
// done properly, a game will never try to write beyond the stack bottom since that would imply a stack overflow
if (RemoveFree(PagesNumber, &pfn, 0, LowestAcceptablePfn, HighestAcceptablePfn)) // MapViewOfFileEx path
if (RemoveFree(PteNumber, &pfn, 0, LowestAcceptablePfn, HighestAcceptablePfn)) // MapViewOfFileEx path
{
MappingRoutine = &VMManager::MapBlockWithMapViewOfFileEx;
addr = MapMemoryBlock(MappingRoutine, MemoryType, PagesNumber, pfn);
addr = MapMemoryBlock(MappingRoutine, MemoryType, PteNumber, pfn);
if (!addr)
{
InsertFree(pfn, pfn + PagesNumber - 1);
InsertFree(pfn, pfn + PteNumber - 1);
goto Fail;
}
}
@ -868,7 +891,7 @@ VAddr VMManager::AllocateSystemMemory(PageType BusyType, DWORD Perms, size_t Siz
bVAlloc = true;
MappingRoutine = &VMManager::MapBlockWithVirtualAlloc;
addr = MapMemoryBlock(MappingRoutine, MemoryType, PagesNumber, 0);
addr = MapMemoryBlock(MappingRoutine, MemoryType, PteNumber, 0);
if (!addr) { goto Fail; }
}
@ -887,7 +910,7 @@ VAddr VMManager::AllocateSystemMemory(PageType BusyType, DWORD Perms, size_t Siz
{
// Recalculate the granularity aligned addr
UnmapViewOfFile((void*)ROUND_DOWN(addr, m_AllocationGranularity));
InsertFree(pfn, pfn + PagesNumber - 1);
InsertFree(pfn, pfn + PteNumber - 1);
}
goto Fail;
}
@ -899,16 +922,13 @@ VAddr VMManager::AllocateSystemMemory(PageType BusyType, DWORD Perms, size_t Siz
// Also increment by one the number of pte's used for the guard page. Note that we can't simply call WritePte
// with bZero set because that will decrease the number of pte's used
XBOX_PFN PTpfn = GetPfnOfPT(PointerPte);
PTpfn.PTPageFrame.PtesUsed++;
PXBOX_PFN PTpfn = GetPfnOfPT(PointerPte);
PTpfn->PTPageFrame.PtesUsed++;
WRITE_ZERO_PTE(PointerPte);
PointerPte++;
}
EndingPte = PointerPte + PagesNumber - 1;
WritePte(PointerPte, EndingPte, TempPte, pfn);
EndingPte->Hardware.GuardOrEnd = 1;
if (bVAlloc)
{
PFN TempPfn;
@ -916,19 +936,21 @@ VAddr VMManager::AllocateSystemMemory(PageType BusyType, DWORD Perms, size_t Siz
while (PointerPte <= EndingPte)
{
RemoveFree(1, &TempPfn, 0, LowestAcceptablePfn, HighestAcceptablePfn);
WritePte(PointerPte, PointerPte, TempPte, pfn);
WritePfn(TempPfn, TempPfn, PointerPte, BusyType);
PointerPte++;
}
}
else
{
if (bAddGuardPage) { InsertFree(pfn, pfn); pfn++; } // Free the commited guard page
EndingPfn = pfn + PagesNumber - 1;
WritePte(PointerPte, EndingPte, TempPte, pfn);
WritePfn(pfn, EndingPfn, PointerPte, BusyType);
}
EndingPte->Hardware.GuardOrEnd = 1;
UpdateMemoryPermissions(addr, PagesNumber << PAGE_SHIFT, Perms);
if (bAddGuardPage) { addr -= PAGE_SIZE; }
UpdateMemoryPermissions(bAddGuardPage ? addr + PAGE_SIZE : addr, PagesNumber << PAGE_SHIFT, Perms);
ConstructVMA(addr, PteNumber << PAGE_SHIFT, MemoryType, AllocatedVma, bVAlloc);
@ -1195,11 +1217,11 @@ PFN_COUNT VMManager::DeallocateSystemMemory(PageType BusyType, VAddr addr, size_
{
// ergo720: I'm not sure but MmDbgFreeMemory seems to be able to deallocate only a part of an allocation done by
// MmDbgAllocateMemory. This is not a problem since CarveVMARange supports freeing only a part of an allocated
// vma, but we won't pass its size to CheckExistenceVMA because it would fail the size check
// vma, but we won't call CheckExistenceVMA because it can fail the size and/or the address check
assert(IS_DEVKIT_ADDRESS(addr));
MemoryType = DevkitRegion;
it = CheckExistenceVMA(addr, MemoryType);
it = GetVMAIterator(addr, MemoryType);
}
else
{
@ -1214,6 +1236,11 @@ PFN_COUNT VMManager::DeallocateSystemMemory(PageType BusyType, VAddr addr, size_
}
StartingPte = GetPteAddress(addr);
if (StartingPte->Hardware.Valid == 0) {
WritePte(StartingPte, StartingPte, *StartingPte, 0, true); // this is the guard page of the stack
StartingPte++;
}
if (BusyType == DebuggerType)
{
EndingPte = StartingPte + (ROUND_UP_4K(Size) >> PAGE_SHIFT) - 1;
@ -1469,9 +1496,6 @@ xboxkrnl::NTSTATUS VMManager::XbAllocateVirtualMemory(VAddr* addr, ULONG ZeroBit
// Limit number of zero bits upto 20
if (ZeroBits > 21) { RETURN(STATUS_INVALID_PARAMETER); }
// No empty allocation allowed
if (CapturedSize == 0) { RETURN(STATUS_INVALID_PARAMETER); }
// Check for unknown MEM flags
if(AllocationType & ~(XBOX_MEM_COMMIT | XBOX_MEM_RESERVE | XBOX_MEM_TOP_DOWN | XBOX_MEM_RESET
| XBOX_MEM_NOZERO)) { RETURN(STATUS_INVALID_PARAMETER); }
@ -1490,18 +1514,37 @@ xboxkrnl::NTSTATUS VMManager::XbAllocateVirtualMemory(VAddr* addr, ULONG ZeroBit
// This can lead to unaligned blocks if the host granularity is not 64K. Fortunately, Windows uses that
VAddr AlignedCapturedBase;
size_t AlignedCapturedSize;
if (Protect & XBOX_MEM_RESERVE)
VMAIter it;
Lock();
if (AllocationType & XBOX_MEM_RESERVE || CapturedBase == 0)
{
AlignedCapturedBase = ROUND_DOWN(CapturedBase, X64KB);
AlignedCapturedSize = ROUND_UP_4K(CapturedSize);
}
if (Protect & XBOX_MEM_COMMIT)
{
AlignedCapturedBase = ROUND_DOWN_4K(CapturedBase);
AlignedCapturedSize = (PAGES_SPANNED(CapturedSize, CapturedSize)) << PAGE_SHIFT;
}
Lock();
if (CapturedBase != 0)
{
// The specified region must be completely inside a free vma
it = CheckConflictingVMA(AlignedCapturedBase, AlignedCapturedSize);
if (it == m_MemoryRegionArray[UserRegion].RegionMap.end() || it->second.type != FreeVma) { status = STATUS_CONFLICTING_ADDRESSES; goto Exit; }
}
CapturedBase = AlignedCapturedBase;
}
if (AllocationType & XBOX_MEM_COMMIT)
{
if (CapturedBase != 0)
{
AlignedCapturedBase = ROUND_DOWN_4K(CapturedBase);
AlignedCapturedSize = (PAGES_SPANNED(CapturedBase, CapturedSize)) << PAGE_SHIFT;
it = CheckConflictingVMA(AlignedCapturedBase, AlignedCapturedSize);
// The specified region must be completely inside a reserved vma
if (it == m_MemoryRegionArray[UserRegion].RegionMap.end() || it->second.type != ReservedVma) { status = STATUS_CONFLICTING_ADDRESSES; goto Exit; }
}
else { AlignedCapturedSize = ROUND_UP_4K(CapturedSize); }
}
if (AllocationType != XBOX_MEM_RESET)
{
@ -1509,7 +1552,7 @@ xboxkrnl::NTSTATUS VMManager::XbAllocateVirtualMemory(VAddr* addr, ULONG ZeroBit
DWORD WindowsAllocType = ConvertXboxToWinAllocType(AllocationType);
// Only allocate memory if the base address is higher than our memory placeholder
if (AlignedCapturedBase > XBE_MAX_VA)
if (AlignedCapturedBase > XBE_MAX_VA || AlignedCapturedBase == 0)
{
status = NtDll::NtAllocateVirtualMemory(
/*ProcessHandle=*/g_CurrentProcessHandle,
@ -1527,8 +1570,8 @@ xboxkrnl::NTSTATUS VMManager::XbAllocateVirtualMemory(VAddr* addr, ULONG ZeroBit
*addr = AlignedCapturedBase;
*Size = AlignedCapturedSize;
Unlock();
RETURN(STATUS_SUCCESS);
status = STATUS_SUCCESS;
goto Exit;
}
if (NT_SUCCESS(status))
@ -1536,7 +1579,7 @@ xboxkrnl::NTSTATUS VMManager::XbAllocateVirtualMemory(VAddr* addr, ULONG ZeroBit
DbgPrintf("VMEM: XbAllocateVirtualMemory resulting range : 0x%.8X - 0x%.8X\n", AlignedCapturedBase,
AlignedCapturedBase + AlignedCapturedSize);
if (Protect & XBOX_MEM_RESERVE)
if (AllocationType & XBOX_MEM_RESERVE || (AllocationType & XBOX_MEM_COMMIT && CapturedBase == 0))
{
m_VirtualMemoryBytesReserved += AlignedCapturedSize;
ConstructVMA(AlignedCapturedBase, AlignedCapturedSize, UserRegion, ReservedVma, true, Protect);
@ -1560,8 +1603,9 @@ xboxkrnl::NTSTATUS VMManager::XbAllocateVirtualMemory(VAddr* addr, ULONG ZeroBit
*addr = AlignedCapturedBase;
*Size = AlignedCapturedSize;
}
Unlock();
Exit:
Unlock();
RETURN(status);
}
@ -1603,6 +1647,12 @@ xboxkrnl::NTSTATUS VMManager::XbFreeVirtualMemory(VAddr* addr, size_t* Size, DWO
Lock();
VMAIter it = CheckConflictingVMA(AlignedCapturedBase, AlignedCapturedSize);
if (it == m_MemoryRegionArray[UserRegion].RegionMap.end() || it->second.type != ReservedVma) { status = STATUS_MEMORY_NOT_ALLOCATED; goto Exit; }
if (it->first + it->second.size - 1 < AlignedCapturedBase + AlignedCapturedSize - 1) { status = STATUS_UNABLE_TO_FREE_VM; goto Exit; }
// Don't free our memory placeholder
if (AlignedCapturedBase > XBE_MAX_VA)
{
@ -1632,6 +1682,7 @@ xboxkrnl::NTSTATUS VMManager::XbFreeVirtualMemory(VAddr* addr, size_t* Size, DWO
*Size = AlignedCapturedSize;
}
Exit:
Unlock();
RETURN(status);
}
@ -2001,19 +2052,19 @@ VMAIter VMManager::CheckExistenceVMA(VAddr addr, MemoryRegionType Type, size_t S
return m_MemoryRegionArray[Type].RegionMap.end();
}
}
#if 0
bool VMManager::CheckConflictingVMA(VAddr addr, size_t Size)
VMAIter VMManager::CheckConflictingVMA(VAddr addr, size_t Size)
{
VMAIter it = GetVMAIterator(addr, UserRegion);
if (it != m_MemoryRegionArray[UserRegion].RegionMap.end() && it->second.type == FreeVma)
if (it != m_MemoryRegionArray[UserRegion].RegionMap.end() && it->first <= addr && it->second.size >= Size)
{
if (it->second.size >= Size) { return false; } // no conflict
return it; // conflict
}
return true; // conflict
return m_MemoryRegionArray[UserRegion].RegionMap.end(); // no conflict
}
#endif
void VMManager::DestructVMA(VMAIter it, MemoryRegionType Type, bool bSkipDestruction)
{
BOOL ret;

View File

@ -211,12 +211,14 @@ class VMManager : public PhysicalMemory
VMAIter SplitVMA(VMAIter vma_handle, u32 offset_in_vma, MemoryRegionType Type);
// merges the specified vma with adjacent ones if possible
VMAIter MergeAdjacentVMA(VMAIter vma_handle, MemoryRegionType Type);
// checks if the specified vma overlaps with another
//bool CheckConflictingVMA(VAddr addr, size_t Size);
// checks if the specified range is completely inside a vma
VMAIter CheckConflictingVMA(VAddr addr, size_t Size);
// changes the access permissions of a block of memory
void UpdateMemoryPermissions(VAddr addr, size_t Size, DWORD Perms);
// restores persistent memory
void RestorePersistentMemory();
// restores a persistent allocation
void RestorePersistentAllocation(VAddr addr, PFN StartingPfn, PFN EndingPfn, PageType Type);
// checks if any of the EXECUTE flags are set
bool HasPageExecutionFlag(DWORD protect);
// acquires the critical section