diff --git a/src/xenia/cpu/thread_state.cc b/src/xenia/cpu/thread_state.cc index 54d795f24..73af5b2d6 100644 --- a/src/xenia/cpu/thread_state.cc +++ b/src/xenia/cpu/thread_state.cc @@ -46,9 +46,11 @@ ThreadState::ThreadState(Processor* processor, uint32_t thread_id, backend_data_ = processor->backend()->AllocThreadData(); if (!stack_address) { + // We must always allocate 64K as a guard region before stacks, as we can + // only Protect() on system page granularity. stack_size = (stack_size + 0xFFF) & 0xFFFFF000; uint32_t stack_alignment = (stack_size & 0xF000) ? 0x1000 : 0x10000; - uint32_t stack_padding = stack_alignment * 1; + uint32_t stack_padding = xe::page_size(); // Host page size. uint32_t actual_stack_size = stack_padding + stack_size; bool top_down; switch (stack_type) { diff --git a/src/xenia/memory.cc b/src/xenia/memory.cc index e78edfd98..9c45bb11a 100644 --- a/src/xenia/memory.cc +++ b/src/xenia/memory.cc @@ -852,12 +852,19 @@ bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) { } // Attempt host change (hopefully won't fail). - DWORD new_protect = ToWin32ProtectFlags(protect); - DWORD old_protect; - if (!VirtualProtect(membase_ + heap_base_ + start_page_number * page_size_, - page_count * page_size_, new_protect, &old_protect)) { - XELOGE("BaseHeap::Protect failed due to host VirtualProtect failure"); - return false; + // We can only do this if our size matches system page granularity. + if (page_size_ == xe::page_size() || + ((page_count * page_size_) % xe::page_size() == 0) && + ((start_page_number * page_size_) % xe::page_size() == 0)) { + DWORD new_protect = ToWin32ProtectFlags(protect); + DWORD old_protect; + if (!VirtualProtect(membase_ + heap_base_ + start_page_number * page_size_, + page_count * page_size_, new_protect, &old_protect)) { + XELOGE("BaseHeap::Protect failed due to host VirtualProtect failure"); + return false; + } + } else { + XELOGW("BaseHeap::Protect: ignoring request as not 64k page aligned"); } // Perform table change.