Abstraction for VirtualAlloc/VirtualFree.
This commit is contained in:
parent
58c3a1ba79
commit
6cf29b969d
|
@ -25,10 +25,34 @@ size_t page_size();
|
|||
|
||||
enum class PageAccess {
|
||||
kNoAccess = 0,
|
||||
kReadOnly,
|
||||
kReadWrite,
|
||||
kReadOnly = 1 << 0,
|
||||
kReadWrite = kReadOnly | 1 << 1,
|
||||
kExecuteReadWrite = kReadWrite | 1 << 2,
|
||||
};
|
||||
|
||||
enum class AllocationType {
|
||||
kReserve = 1 << 0,
|
||||
kCommit = 1 << 1,
|
||||
kReserveCommit = kReserve | kCommit,
|
||||
};
|
||||
|
||||
enum class DeallocationType {
|
||||
kRelease = 1 << 0,
|
||||
kDecommit = 1 << 1,
|
||||
kDecommitRelease = kRelease | kDecommit,
|
||||
};
|
||||
|
||||
// Allocates a block of memory at the given page-aligned base address.
|
||||
// Fails if the memory is not available.
|
||||
void* AllocFixed(void* base_address, size_t length,
|
||||
AllocationType allocation_type, PageAccess access);
|
||||
|
||||
// Deallocates and/or releases the given block of memory.
|
||||
// When releasing memory length must be zero, as all pages in the region are
|
||||
// released.
|
||||
bool DeallocFixed(void* base_address, size_t length,
|
||||
DeallocationType deallocation_type);
|
||||
|
||||
// Sets the access rights for the given block of memory and returns the previous
|
||||
// access rights. Both base_address and length will be adjusted to page_size().
|
||||
bool Protect(void* base_address, size_t length, PageAccess access,
|
||||
|
|
|
@ -24,26 +24,69 @@ size_t page_size() {
|
|||
return value;
|
||||
}
|
||||
|
||||
DWORD ToWin32ProtectFlags(PageAccess access) {
|
||||
switch (access) {
|
||||
case PageAccess::kNoAccess:
|
||||
return PAGE_NOACCESS;
|
||||
case PageAccess::kReadOnly:
|
||||
return PAGE_READONLY;
|
||||
case PageAccess::kReadWrite:
|
||||
return PAGE_READWRITE;
|
||||
case PageAccess::kExecuteReadWrite:
|
||||
return PAGE_EXECUTE_READWRITE;
|
||||
default:
|
||||
assert_unhandled_case(access);
|
||||
return PAGE_NOACCESS;
|
||||
}
|
||||
}
|
||||
|
||||
void* AllocFixed(void* base_address, size_t length,
|
||||
AllocationType allocation_type, PageAccess access) {
|
||||
DWORD alloc_type = 0;
|
||||
switch (allocation_type) {
|
||||
case AllocationType::kReserve:
|
||||
alloc_type = MEM_RESERVE;
|
||||
break;
|
||||
case AllocationType::kCommit:
|
||||
alloc_type = MEM_COMMIT;
|
||||
break;
|
||||
case AllocationType::kReserveCommit:
|
||||
alloc_type = MEM_RESERVE | MEM_COMMIT;
|
||||
break;
|
||||
default:
|
||||
assert_unhandled_case(allocation_type);
|
||||
break;
|
||||
}
|
||||
DWORD protect = ToWin32ProtectFlags(access);
|
||||
return VirtualAlloc(base_address, length, alloc_type, protect);
|
||||
}
|
||||
|
||||
bool DeallocFixed(void* base_address, size_t length,
|
||||
DeallocationType deallocation_type) {
|
||||
DWORD free_type = 0;
|
||||
switch (deallocation_type) {
|
||||
case DeallocationType::kRelease:
|
||||
free_type = MEM_RELEASE;
|
||||
break;
|
||||
case DeallocationType::kDecommit:
|
||||
free_type = MEM_DECOMMIT;
|
||||
break;
|
||||
case DeallocationType::kDecommitRelease:
|
||||
free_type = MEM_RELEASE | MEM_DECOMMIT;
|
||||
break;
|
||||
default:
|
||||
assert_unhandled_case(deallocation_type);
|
||||
break;
|
||||
}
|
||||
return VirtualFree(base_address, length, free_type) ? true : false;
|
||||
}
|
||||
|
||||
bool Protect(void* base_address, size_t length, PageAccess access,
|
||||
PageAccess* out_old_access) {
|
||||
if (out_old_access) {
|
||||
*out_old_access = PageAccess::kNoAccess;
|
||||
}
|
||||
DWORD new_protect;
|
||||
switch (access) {
|
||||
case PageAccess::kNoAccess:
|
||||
new_protect = PAGE_NOACCESS;
|
||||
break;
|
||||
case PageAccess::kReadOnly:
|
||||
new_protect = PAGE_READONLY;
|
||||
break;
|
||||
case PageAccess::kReadWrite:
|
||||
new_protect = PAGE_READWRITE;
|
||||
break;
|
||||
default:
|
||||
assert_unhandled_case(access);
|
||||
break;
|
||||
}
|
||||
DWORD new_protect = ToWin32ProtectFlags(access);
|
||||
DWORD old_protect = 0;
|
||||
BOOL result = VirtualProtect(base_address, length, new_protect, &old_protect);
|
||||
if (result) {
|
||||
|
@ -58,6 +101,8 @@ bool Protect(void* base_address, size_t length, PageAccess access,
|
|||
case PAGE_READWRITE:
|
||||
*out_old_access = PageAccess::kReadWrite;
|
||||
break;
|
||||
case PAGE_EXECUTE_READWRITE:
|
||||
*out_old_access = PageAccess::kExecuteReadWrite;
|
||||
default:
|
||||
assert_unhandled_case(access);
|
||||
break;
|
||||
|
|
|
@ -43,7 +43,8 @@ X64CodeCache::X64CodeCache()
|
|||
|
||||
X64CodeCache::~X64CodeCache() {
|
||||
if (indirection_table_base_) {
|
||||
VirtualFree(indirection_table_base_, 0, MEM_RELEASE);
|
||||
xe::memory::DeallocFixed(indirection_table_base_, 0,
|
||||
xe::memory::DeallocationType::kRelease);
|
||||
}
|
||||
|
||||
#ifdef USE_GROWABLE_FUNCTION_TABLE
|
||||
|
@ -66,9 +67,10 @@ X64CodeCache::~X64CodeCache() {
|
|||
}
|
||||
|
||||
bool X64CodeCache::Initialize() {
|
||||
indirection_table_base_ = reinterpret_cast<uint8_t*>(
|
||||
VirtualAlloc(reinterpret_cast<void*>(kIndirectionTableBase),
|
||||
kIndirectionTableSize, MEM_RESERVE, PAGE_READWRITE));
|
||||
indirection_table_base_ = reinterpret_cast<uint8_t*>(xe::memory::AllocFixed(
|
||||
reinterpret_cast<void*>(kIndirectionTableBase), kIndirectionTableSize,
|
||||
xe::memory::AllocationType::kReserve,
|
||||
xe::memory::PageAccess::kReadWrite));
|
||||
if (!indirection_table_base_) {
|
||||
XELOGE("Unable to allocate code cache indirection table");
|
||||
XELOGE(
|
||||
|
@ -91,7 +93,7 @@ bool X64CodeCache::Initialize() {
|
|||
return false;
|
||||
}
|
||||
|
||||
// Mapp generated code region into the file. Pages are committed as required.
|
||||
// Map generated code region into the file. Pages are committed as required.
|
||||
generated_code_base_ = reinterpret_cast<uint8_t*>(MapViewOfFileEx(
|
||||
mapping_, FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE, 0, 0,
|
||||
kGeneratedCodeSize, reinterpret_cast<void*>(kGeneratedCodeBase)));
|
||||
|
@ -153,8 +155,10 @@ void X64CodeCache::AddIndirection(uint32_t guest_address,
|
|||
void X64CodeCache::CommitExecutableRange(uint32_t guest_low,
|
||||
uint32_t guest_high) {
|
||||
// Commit the memory.
|
||||
VirtualAlloc(indirection_table_base_ + (guest_low - kIndirectionTableBase),
|
||||
guest_high - guest_low, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
|
||||
xe::memory::AllocFixed(
|
||||
indirection_table_base_ + (guest_low - kIndirectionTableBase),
|
||||
guest_high - guest_low, xe::memory::AllocationType::kCommit,
|
||||
xe::memory::PageAccess::kExecuteReadWrite);
|
||||
|
||||
// Fill memory with the default value.
|
||||
uint32_t* p = reinterpret_cast<uint32_t*>(indirection_table_base_);
|
||||
|
@ -198,8 +202,9 @@ void* X64CodeCache::PlaceCode(uint32_t guest_address, void* machine_code,
|
|||
size_t old_commit_mark = generated_code_commit_mark_;
|
||||
if (high_mark > old_commit_mark) {
|
||||
size_t new_commit_mark = old_commit_mark + 16 * 1024 * 1024;
|
||||
VirtualAlloc(generated_code_base_, new_commit_mark, MEM_COMMIT,
|
||||
PAGE_EXECUTE_READWRITE);
|
||||
xe::memory::AllocFixed(generated_code_base_, new_commit_mark,
|
||||
xe::memory::AllocationType::kCommit,
|
||||
xe::memory::PageAccess::kExecuteReadWrite);
|
||||
generated_code_commit_mark_.compare_exchange_strong(old_commit_mark,
|
||||
new_commit_mark);
|
||||
}
|
||||
|
@ -394,8 +399,9 @@ uint32_t X64CodeCache::PlaceData(const void* data, size_t length) {
|
|||
size_t old_commit_mark = generated_code_commit_mark_;
|
||||
if (high_mark > old_commit_mark) {
|
||||
size_t new_commit_mark = old_commit_mark + 16 * 1024 * 1024;
|
||||
VirtualAlloc(generated_code_base_, new_commit_mark, MEM_COMMIT,
|
||||
PAGE_EXECUTE_READWRITE);
|
||||
xe::memory::AllocFixed(generated_code_base_, new_commit_mark,
|
||||
xe::memory::AllocationType::kCommit,
|
||||
xe::memory::PageAccess::kExecuteReadWrite);
|
||||
generated_code_commit_mark_.compare_exchange_strong(old_commit_mark,
|
||||
new_commit_mark);
|
||||
}
|
||||
|
|
|
@ -357,9 +357,9 @@ bool Memory::AddVirtualMappedRange(uint32_t virtual_address, uint32_t mask,
|
|||
uint32_t size, void* context,
|
||||
cpu::MMIOReadCallback read_callback,
|
||||
cpu::MMIOWriteCallback write_callback) {
|
||||
DWORD protect = PAGE_NOACCESS;
|
||||
if (!VirtualAlloc(TranslateVirtual(virtual_address), size, MEM_COMMIT,
|
||||
protect)) {
|
||||
if (!xe::memory::AllocFixed(TranslateVirtual(virtual_address), size,
|
||||
xe::memory::AllocationType::kCommit,
|
||||
xe::memory::PageAccess::kNoAccess)) {
|
||||
XELOGE("Unable to map range; commit/protect failed");
|
||||
return false;
|
||||
}
|
||||
|
@ -448,41 +448,6 @@ xe::memory::PageAccess ToPageAccess(uint32_t protect) {
|
|||
}
|
||||
}
|
||||
|
||||
DWORD ToWin32ProtectFlags(uint32_t protect) {
|
||||
DWORD result = 0;
|
||||
if ((protect & kMemoryProtectRead) && !(protect & kMemoryProtectWrite)) {
|
||||
result |= PAGE_READONLY;
|
||||
} else if ((protect & kMemoryProtectRead) &&
|
||||
(protect & kMemoryProtectWrite)) {
|
||||
result |= PAGE_READWRITE;
|
||||
} else {
|
||||
result |= PAGE_NOACCESS;
|
||||
}
|
||||
// if (protect & kMemoryProtectNoCache) {
|
||||
// result |= PAGE_NOCACHE;
|
||||
//}
|
||||
// if (protect & kMemoryProtectWriteCombine) {
|
||||
// result |= PAGE_WRITECOMBINE;
|
||||
//}
|
||||
return result;
|
||||
}
|
||||
|
||||
uint32_t FromWin32ProtectFlags(DWORD protect) {
|
||||
uint32_t result = 0;
|
||||
if (protect & PAGE_READONLY) {
|
||||
result |= kMemoryProtectRead;
|
||||
} else if (protect & PAGE_READWRITE) {
|
||||
result |= kMemoryProtectRead | kMemoryProtectWrite;
|
||||
}
|
||||
if (protect & PAGE_NOCACHE) {
|
||||
result |= kMemoryProtectNoCache;
|
||||
}
|
||||
if (protect & PAGE_WRITECOMBINE) {
|
||||
result |= kMemoryProtectWriteCombine;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
BaseHeap::BaseHeap()
|
||||
: membase_(nullptr), heap_base_(0), heap_size_(0), page_size_(0) {}
|
||||
|
||||
|
@ -503,8 +468,8 @@ void BaseHeap::Dispose() {
|
|||
++page_number) {
|
||||
auto& page_entry = page_table_[page_number];
|
||||
if (page_entry.state) {
|
||||
VirtualFree(membase_ + heap_base_ + page_number * page_size_, 0,
|
||||
MEM_RELEASE);
|
||||
xe::memory::DeallocFixed(membase_ + heap_base_ + page_number * page_size_,
|
||||
0, xe::memory::DeallocationType::kRelease);
|
||||
page_number += page_entry.region_page_count;
|
||||
}
|
||||
}
|
||||
|
@ -615,14 +580,12 @@ bool BaseHeap::AllocFixed(uint32_t base_address, uint32_t size,
|
|||
if (allocation_type == kMemoryAllocationReserve) {
|
||||
// Reserve is not needed, as we are mapped already.
|
||||
} else {
|
||||
DWORD flAllocationType = 0;
|
||||
if (allocation_type & kMemoryAllocationCommit) {
|
||||
flAllocationType |= MEM_COMMIT;
|
||||
}
|
||||
LPVOID result =
|
||||
VirtualAlloc(membase_ + heap_base_ + start_page_number * page_size_,
|
||||
page_count * page_size_, flAllocationType,
|
||||
ToWin32ProtectFlags(protect));
|
||||
auto alloc_type = (allocation_type & kMemoryAllocationCommit)
|
||||
? xe::memory::AllocationType::kCommit
|
||||
: xe::memory::AllocationType::kReserve;
|
||||
void* result = xe::memory::AllocFixed(
|
||||
membase_ + heap_base_ + start_page_number * page_size_,
|
||||
page_count * page_size_, alloc_type, ToPageAccess(protect));
|
||||
if (!result) {
|
||||
XELOGE("BaseHeap::AllocFixed failed to alloc range from host");
|
||||
return false;
|
||||
|
@ -755,20 +718,18 @@ bool BaseHeap::AllocRange(uint32_t low_address, uint32_t high_address,
|
|||
if (allocation_type == kMemoryAllocationReserve) {
|
||||
// Reserve is not needed, as we are mapped already.
|
||||
} else {
|
||||
DWORD flAllocationType = 0;
|
||||
if (allocation_type & kMemoryAllocationCommit) {
|
||||
flAllocationType |= MEM_COMMIT;
|
||||
}
|
||||
LPVOID result =
|
||||
VirtualAlloc(membase_ + heap_base_ + start_page_number * page_size_,
|
||||
page_count * page_size_, flAllocationType,
|
||||
ToWin32ProtectFlags(protect));
|
||||
auto alloc_type = (allocation_type & kMemoryAllocationCommit)
|
||||
? xe::memory::AllocationType::kCommit
|
||||
: xe::memory::AllocationType::kReserve;
|
||||
void* result = xe::memory::AllocFixed(
|
||||
membase_ + heap_base_ + start_page_number * page_size_,
|
||||
page_count * page_size_, alloc_type, ToPageAccess(protect));
|
||||
if (!result) {
|
||||
XELOGE("BaseHeap::Alloc failed to alloc range from host");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (FLAGS_scribble_heap && protect & kMemoryProtectWrite) {
|
||||
if (FLAGS_scribble_heap && (protect & kMemoryProtectWrite)) {
|
||||
std::memset(result, 0xCD, page_count * page_size_);
|
||||
}
|
||||
}
|
||||
|
@ -784,7 +745,7 @@ bool BaseHeap::AllocRange(uint32_t low_address, uint32_t high_address,
|
|||
page_entry.state = kMemoryAllocationReserve | allocation_type;
|
||||
}
|
||||
|
||||
*out_address = heap_base_ + start_page_number* page_size_;
|
||||
*out_address = heap_base_ + (start_page_number * page_size_);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -831,7 +792,7 @@ bool BaseHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
|
|||
}
|
||||
|
||||
if (out_region_size) {
|
||||
*out_region_size = base_page_entry.region_page_count* page_size_;
|
||||
*out_region_size = (base_page_entry.region_page_count * page_size_);
|
||||
}
|
||||
|
||||
// Release from host not needed as mapping reserves the range for us.
|
||||
|
@ -981,7 +942,7 @@ bool BaseHeap::QuerySize(uint32_t address, uint32_t* out_size) {
|
|||
}
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto page_entry = page_table_[page_number];
|
||||
*out_size = page_entry.region_page_count* page_size_;
|
||||
*out_size = (page_entry.region_page_count * page_size_);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue