xboxkrnl: Initial (untested) implementation of NtProtectVirtualMemory

This commit is contained in:
Dr. Chat 2017-07-24 21:41:47 -05:00
parent a5bdeeb2da
commit aee5601c68
3 changed files with 84 additions and 7 deletions

View File

@ -164,6 +164,55 @@ dword_result_t NtAllocateVirtualMemory(lpdword_t base_addr_ptr,
DECLARE_XBOXKRNL_EXPORT(NtAllocateVirtualMemory,
ExportTag::kImplemented | ExportTag::kMemory);
dword_result_t NtProtectVirtualMemory(lpdword_t base_addr_ptr,
lpdword_t region_size_ptr,
dword_t protect_bits,
lpdword_t old_protect,
dword_t debug_memory) {
// Set to TRUE when this memory refers to devkit memory area.
assert_true(debug_memory == 0);
// Must request a size.
if (!base_addr_ptr || !region_size_ptr) {
return X_STATUS_INVALID_PARAMETER;
}
// Don't allow games to set execute bits.
if (protect_bits & (X_PAGE_EXECUTE | X_PAGE_EXECUTE_READ |
X_PAGE_EXECUTE_READWRITE | X_PAGE_EXECUTE_WRITECOPY)) {
XELOGW("Game setting EXECUTE bit on protect");
return X_STATUS_ACCESS_DENIED;
}
auto heap = kernel_memory()->LookupHeap(*base_addr_ptr);
// Adjust the base downwards to the nearest page boundary.
uint32_t adjusted_base =
*base_addr_ptr - (*base_addr_ptr % heap->page_size());
uint32_t adjusted_size = xe::round_up(*region_size_ptr, heap->page_size());
uint32_t protect = FromXdkProtectFlags(protect_bits);
uint32_t tmp_old_protect = 0;
// FIXME: I think it's valid for NtProtectVirtualMemory to span regions, but
// as of now our implementation will fail in this case. Need to verify.
if (!heap->Protect(adjusted_base, adjusted_size, protect, &tmp_old_protect)) {
return X_STATUS_ACCESS_DENIED;
}
// Write back output variables.
*base_addr_ptr = adjusted_base;
*region_size_ptr = adjusted_size;
if (old_protect) {
*old_protect = tmp_old_protect;
}
return X_STATUS_SUCCESS;
}
DECLARE_XBOXKRNL_EXPORT(NtProtectVirtualMemory,
ExportTag::kImplemented | ExportTag::kMemory);
dword_result_t NtFreeVirtualMemory(lpdword_t base_addr_ptr,
lpdword_t region_size_ptr, dword_t free_type,
dword_t debug_memory) {

View File

@ -475,6 +475,23 @@ xe::memory::PageAccess ToPageAccess(uint32_t protect) {
}
}
uint32_t FromPageAccess(xe::memory::PageAccess protect) {
switch (protect) {
case memory::PageAccess::kNoAccess:
return kMemoryProtectNoAccess;
case memory::PageAccess::kReadOnly:
return kMemoryProtectRead;
case memory::PageAccess::kReadWrite:
return kMemoryProtectRead | kMemoryProtectWrite;
case memory::PageAccess::kExecuteReadWrite:
// Guest memory cannot be executable - this should never happen :)
assert_always();
return kMemoryProtectRead | kMemoryProtectWrite;
}
return kMemoryProtectNoAccess;
}
BaseHeap::BaseHeap()
: membase_(nullptr), heap_base_(0), heap_size_(0), page_size_(0) {}
@ -982,7 +999,8 @@ bool BaseHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
return true;
}
bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect,
uint32_t* old_protect) {
uint32_t page_count = xe::round_up(size, page_size_) / page_size_;
uint32_t start_page_number = (address - heap_base_) / page_size_;
uint32_t end_page_number = start_page_number + page_count - 1;
@ -1014,14 +1032,21 @@ bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
if (page_size_ == xe::memory::page_size() ||
(((page_count * page_size_) % xe::memory::page_size() == 0) &&
((start_page_number * page_size_) % xe::memory::page_size() == 0))) {
memory::PageAccess old_protect_access;
if (!xe::memory::Protect(
membase_ + heap_base_ + start_page_number * page_size_,
page_count * page_size_, ToPageAccess(protect), nullptr)) {
page_count * page_size_, ToPageAccess(protect),
&old_protect_access)) {
XELOGE("BaseHeap::Protect failed due to host VirtualProtect failure");
return false;
}
if (old_protect) {
*old_protect = FromPageAccess(old_protect_access);
}
} else {
XELOGW("BaseHeap::Protect: ignoring request as not 64k page aligned");
XELOGW("BaseHeap::Protect: ignoring request as not 4k page aligned");
return false;
}
// Perform table change.
@ -1293,12 +1318,13 @@ bool PhysicalHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
return BaseHeap::Release(base_address, out_region_size);
}
bool PhysicalHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
bool PhysicalHeap::Protect(uint32_t address, uint32_t size, uint32_t protect,
uint32_t* old_protect) {
auto global_lock = global_critical_region_.Acquire();
uint32_t parent_address = GetPhysicalAddress(address);
cpu::MMIOHandler::global_handler()->InvalidateRange(parent_address, size);
if (!parent_heap_->Protect(parent_address, size, protect)) {
if (!parent_heap_->Protect(parent_address, size, protect, old_protect)) {
XELOGE("PhysicalHeap::Protect failed due to parent heap failure");
return false;
}

View File

@ -135,7 +135,8 @@ class BaseHeap {
virtual bool Release(uint32_t address, uint32_t* out_region_size = nullptr);
// Modifies the protection mode of pages within the given range.
virtual bool Protect(uint32_t address, uint32_t size, uint32_t protect);
virtual bool Protect(uint32_t address, uint32_t size, uint32_t protect,
uint32_t* old_protect = nullptr);
// Queries information about the given region of pages.
bool QueryRegionInfo(uint32_t base_address, HeapAllocationInfo* out_info);
@ -208,7 +209,8 @@ class PhysicalHeap : public BaseHeap {
bool Decommit(uint32_t address, uint32_t size) override;
bool Release(uint32_t base_address,
uint32_t* out_region_size = nullptr) override;
bool Protect(uint32_t address, uint32_t size, uint32_t protect) override;
bool Protect(uint32_t address, uint32_t size, uint32_t protect,
uint32_t* old_protect = nullptr) override;
protected:
VirtualHeap* parent_heap_;