Optimizing page allocation (still shit) and adding MmSetAddressProtect.

This commit is contained in:
Ben Vanik 2015-06-03 21:24:09 -07:00
parent 50813d3ffa
commit a9e2ba16fb
3 changed files with 16 additions and 0 deletions

View File

@ -41,6 +41,7 @@ struct ExportTag {
static const type kModules = 1 << 15;
static const type kUserProfiles = 1 << 16;
static const type kNetworking = 1 << 17;
static const type kMemory = 1 << 18;
// Export will be logged on each call.
static const type kLog = 1 << 30;

View File

@ -357,6 +357,14 @@ SHIM_CALL MmQueryAddressProtect_shim(PPCContext* ppc_context,
SHIM_SET_RETURN_32(access);
}
void MmSetAddressProtect(lpvoid_t base_address, dword_t region_size,
dword_t protect_bits) {
uint32_t protect = FromXdkProtectFlags(protect_bits);
auto heap = kernel_memory()->LookupHeap(base_address);
heap->Protect(base_address.guest_address(), region_size, protect);
}
DECLARE_XBOXKRNL_EXPORT(MmSetAddressProtect, ExportTag::kMemory);
SHIM_CALL MmQueryAllocationSize_shim(PPCContext* ppc_context,
KernelState* kernel_state) {
uint32_t base_address = SHIM_GET_ARG_32(0);

View File

@ -679,7 +679,11 @@ bool BaseHeap::AllocRange(uint32_t low_address, uint32_t high_address,
bool is_free = page_table_[page_number].state == 0;
if (!is_free) {
// At least one page in the range is used, skip to next.
// We know we'll be starting at least before this page.
any_taken = true;
base_page_number = page_number - page_count;
base_page_number -= base_page_number % page_scan_stride;
base_page_number += page_scan_stride; // cancel out loop logic
break;
}
}
@ -708,7 +712,10 @@ bool BaseHeap::AllocRange(uint32_t low_address, uint32_t high_address,
bool is_free = page_table_[page_number].state == 0;
if (!is_free) {
// At least one page in the range is used, skip to next.
// We know we'll be starting at least after this page.
any_taken = true;
base_page_number = xe::round_up(page_number + 1, page_scan_stride);
base_page_number -= page_scan_stride; // cancel out loop logic
break;
}
}