Fixing page guards to work at system page granularity.

This commit is contained in:
Ben Vanik 2015-05-18 20:25:15 -07:00
parent bb5466d7b2
commit 7796f5cb60
2 changed files with 16 additions and 7 deletions

View File

@ -46,9 +46,11 @@ ThreadState::ThreadState(Processor* processor, uint32_t thread_id,
backend_data_ = processor->backend()->AllocThreadData();
if (!stack_address) {
// We must always allocate 64K as a guard region before stacks, as we can
// only Protect() on system page granularity.
stack_size = (stack_size + 0xFFF) & 0xFFFFF000;
uint32_t stack_alignment = (stack_size & 0xF000) ? 0x1000 : 0x10000;
uint32_t stack_padding = stack_alignment * 1;
uint32_t stack_padding = xe::page_size(); // Host page size.
uint32_t actual_stack_size = stack_padding + stack_size;
bool top_down;
switch (stack_type) {

View File

@ -852,6 +852,10 @@ bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
}
// Attempt host change (hopefully won't fail).
// We can only do this if our size matches system page granularity.
if (page_size_ == xe::page_size() ||
((page_count * page_size_) % xe::page_size() == 0) &&
((start_page_number * page_size_) % xe::page_size() == 0)) {
DWORD new_protect = ToWin32ProtectFlags(protect);
DWORD old_protect;
if (!VirtualProtect(membase_ + heap_base_ + start_page_number * page_size_,
@ -859,6 +863,9 @@ bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
XELOGE("BaseHeap::Protect failed due to host VirtualProtect failure");
return false;
}
} else {
XELOGW("BaseHeap::Protect: ignoring request as not 64k page aligned");
}
// Perform table change.
for (uint32_t page_number = start_page_number; page_number <= end_page_number;