VirtualProtect platform abstraction.

This commit is contained in:
Ben Vanik 2015-07-15 18:20:05 -07:00
parent 99104a25a8
commit 58c3a1ba79
11 changed files with 155 additions and 56 deletions

View File

@ -24,7 +24,8 @@
<ClCompile Include="src\xenia\base\logging.cc" /> <ClCompile Include="src\xenia\base\logging.cc" />
<ClCompile Include="src\xenia\base\mapped_memory_win.cc" /> <ClCompile Include="src\xenia\base\mapped_memory_win.cc" />
<ClCompile Include="src\xenia\base\math.cc" /> <ClCompile Include="src\xenia\base\math.cc" />
<ClCompile Include="src\xenia\base\memory_generic.cc" /> <ClCompile Include="src\xenia\base\memory.cc" />
<ClCompile Include="src\xenia\base\memory_win.cc" />
<ClCompile Include="src\xenia\base\platform_win.cc" /> <ClCompile Include="src\xenia\base\platform_win.cc" />
<ClCompile Include="src\xenia\base\ring_buffer.cc" /> <ClCompile Include="src\xenia\base\ring_buffer.cc" />
<ClCompile Include="src\xenia\base\string.cc" /> <ClCompile Include="src\xenia\base\string.cc" />

View File

@ -36,9 +36,6 @@
<ClCompile Include="src\xenia\base\math.cc"> <ClCompile Include="src\xenia\base\math.cc">
<Filter>src\xenia\base</Filter> <Filter>src\xenia\base</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="src\xenia\base\memory_generic.cc">
<Filter>src\xenia\base</Filter>
</ClCompile>
<ClCompile Include="src\xenia\base\platform_win.cc"> <ClCompile Include="src\xenia\base\platform_win.cc">
<Filter>src\xenia\base</Filter> <Filter>src\xenia\base</Filter>
</ClCompile> </ClCompile>
@ -57,6 +54,12 @@
<ClCompile Include="src\xenia\base\clock_win.cc"> <ClCompile Include="src\xenia\base\clock_win.cc">
<Filter>src\xenia\base</Filter> <Filter>src\xenia\base</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="src\xenia\base\memory.cc">
<Filter>src\xenia\base</Filter>
</ClCompile>
<ClCompile Include="src\xenia\base\memory_win.cc">
<Filter>src\xenia\base</Filter>
</ClCompile>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClInclude Include="src\xenia\base\arena.h"> <ClInclude Include="src\xenia\base\arena.h">

View File

@ -18,6 +18,21 @@
#include "xenia/base/byte_order.h" #include "xenia/base/byte_order.h"
namespace xe { namespace xe {
namespace memory {
// Returns the native page size of the system, in bytes.
size_t page_size();
enum class PageAccess {
kNoAccess = 0,
kReadOnly,
kReadWrite,
};
// Sets the access rights for the given block of memory and returns the previous
// access rights. Both base_address and length will be adjusted to page_size().
bool Protect(void* base_address, size_t length, PageAccess access,
PageAccess* out_old_access);
inline size_t hash_combine(size_t seed) { return seed; } inline size_t hash_combine(size_t seed) { return seed; }
@ -28,6 +43,10 @@ size_t hash_combine(size_t seed, const T& v, const Ts&... vs) {
return hash_combine(seed, vs...); return hash_combine(seed, vs...);
} }
} // namespace memory
// TODO(benvanik): move into xe::memory::
constexpr void* low_address(void* address) { constexpr void* low_address(void* address) {
return (void*)(uint64_t(address) & 0xFFFFFFFF); return (void*)(uint64_t(address) & 0xFFFFFFFF);
} }

View File

@ -0,0 +1,73 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2015 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#include "xenia/base/memory.h"
#include "xenia/base/platform_win.h"
namespace xe {
namespace memory {
size_t page_size() {
static size_t value = 0;
if (!value) {
SYSTEM_INFO si;
GetSystemInfo(&si);
value = si.dwAllocationGranularity;
}
return value;
}
bool Protect(void* base_address, size_t length, PageAccess access,
PageAccess* out_old_access) {
if (out_old_access) {
*out_old_access = PageAccess::kNoAccess;
}
DWORD new_protect;
switch (access) {
case PageAccess::kNoAccess:
new_protect = PAGE_NOACCESS;
break;
case PageAccess::kReadOnly:
new_protect = PAGE_READONLY;
break;
case PageAccess::kReadWrite:
new_protect = PAGE_READWRITE;
break;
default:
assert_unhandled_case(access);
break;
}
DWORD old_protect = 0;
BOOL result = VirtualProtect(base_address, length, new_protect, &old_protect);
if (result) {
if (out_old_access) {
switch (old_protect) {
case PAGE_NOACCESS:
*out_old_access = PageAccess::kNoAccess;
break;
case PAGE_READONLY:
*out_old_access = PageAccess::kReadOnly;
break;
case PAGE_READWRITE:
*out_old_access = PageAccess::kReadWrite;
break;
default:
assert_unhandled_case(access);
break;
}
}
return true;
} else {
return false;
}
}
} // namespace memory
} // namespace xe

View File

@ -76,9 +76,6 @@ const size_t max_path = 1024; // PATH_MAX
// Launches a web browser to the given URL. // Launches a web browser to the given URL.
void LaunchBrowser(const char* url); void LaunchBrowser(const char* url);
// Returns the native page size of the system, in bytes.
size_t page_size();
} // namespace xe } // namespace xe
#endif // XENIA_BASE_PLATFORM_H_ #endif // XENIA_BASE_PLATFORM_H_

View File

@ -11,16 +11,6 @@
namespace xe { namespace xe {
size_t page_size() {
static size_t value = 0;
if (!value) {
SYSTEM_INFO si;
GetSystemInfo(&si);
value = si.dwAllocationGranularity;
}
return value;
}
void LaunchBrowser(const char* url) { void LaunchBrowser(const char* url) {
ShellExecuteA(NULL, "open", url, NULL, NULL, SW_SHOWNORMAL); ShellExecuteA(NULL, "open", url, NULL, NULL, SW_SHOWNORMAL);
} }

View File

@ -13,7 +13,6 @@
#include "xenia/base/byte_order.h" #include "xenia/base/byte_order.h"
#include "xenia/base/math.h" #include "xenia/base/math.h"
#include "xenia/base/memory.h" #include "xenia/base/memory.h"
#include "xenia/base/platform_win.h"
namespace BE { namespace BE {
#include <beaengine/BeaEngine.h> #include <beaengine/BeaEngine.h>
@ -105,9 +104,9 @@ uintptr_t MMIOHandler::AddPhysicalWriteWatch(uint32_t guest_address,
// This means we need to round up, which will cause spurious access // This means we need to round up, which will cause spurious access
// violations and invalidations. // violations and invalidations.
// TODO(benvanik): only invalidate if actually within the region? // TODO(benvanik): only invalidate if actually within the region?
length = length = xe::round_up(length + (base_address % xe::memory::page_size()),
xe::round_up(length + (base_address % xe::page_size()), xe::page_size()); xe::memory::page_size());
base_address = base_address - (base_address % xe::page_size()); base_address = base_address - (base_address % xe::memory::page_size());
// Add to table. The slot reservation may evict a previous watch, which // Add to table. The slot reservation may evict a previous watch, which
// could include our target, so we do it first. // could include our target, so we do it first.
@ -122,29 +121,33 @@ uintptr_t MMIOHandler::AddPhysicalWriteWatch(uint32_t guest_address,
write_watch_mutex_.unlock(); write_watch_mutex_.unlock();
// Make the desired range read only under all address spaces. // Make the desired range read only under all address spaces.
DWORD old_protect; xe::memory::Protect(physical_membase_ + entry->address, entry->length,
VirtualProtect(physical_membase_ + entry->address, entry->length, xe::memory::PageAccess::kReadOnly, nullptr);
PAGE_READONLY, &old_protect); xe::memory::Protect(virtual_membase_ + 0xA0000000 + entry->address,
VirtualProtect(virtual_membase_ + 0xA0000000 + entry->address, entry->length, entry->length, xe::memory::PageAccess::kReadOnly,
PAGE_READONLY, &old_protect); nullptr);
VirtualProtect(virtual_membase_ + 0xC0000000 + entry->address, entry->length, xe::memory::Protect(virtual_membase_ + 0xC0000000 + entry->address,
PAGE_READONLY, &old_protect); entry->length, xe::memory::PageAccess::kReadOnly,
VirtualProtect(virtual_membase_ + 0xE0000000 + entry->address, entry->length, nullptr);
PAGE_READONLY, &old_protect); xe::memory::Protect(virtual_membase_ + 0xE0000000 + entry->address,
entry->length, xe::memory::PageAccess::kReadOnly,
nullptr);
return reinterpret_cast<uintptr_t>(entry); return reinterpret_cast<uintptr_t>(entry);
} }
void MMIOHandler::ClearWriteWatch(WriteWatchEntry* entry) { void MMIOHandler::ClearWriteWatch(WriteWatchEntry* entry) {
DWORD old_protect; xe::memory::Protect(physical_membase_ + entry->address, entry->length,
VirtualProtect(physical_membase_ + entry->address, entry->length, xe::memory::PageAccess::kReadWrite, nullptr);
PAGE_READWRITE, &old_protect); xe::memory::Protect(virtual_membase_ + 0xA0000000 + entry->address,
VirtualProtect(virtual_membase_ + 0xA0000000 + entry->address, entry->length, entry->length, xe::memory::PageAccess::kReadWrite,
PAGE_READWRITE, &old_protect); nullptr);
VirtualProtect(virtual_membase_ + 0xC0000000 + entry->address, entry->length, xe::memory::Protect(virtual_membase_ + 0xC0000000 + entry->address,
PAGE_READWRITE, &old_protect); entry->length, xe::memory::PageAccess::kReadWrite,
VirtualProtect(virtual_membase_ + 0xE0000000 + entry->address, entry->length, nullptr);
PAGE_READWRITE, &old_protect); xe::memory::Protect(virtual_membase_ + 0xE0000000 + entry->address,
entry->length, xe::memory::PageAccess::kReadWrite,
nullptr);
} }
void MMIOHandler::CancelWriteWatch(uintptr_t watch_handle) { void MMIOHandler::CancelWriteWatch(uintptr_t watch_handle) {

View File

@ -52,7 +52,8 @@ ThreadState::ThreadState(Processor* processor, uint32_t thread_id,
// only Protect() on system page granularity. // only Protect() on system page granularity.
stack_size = (stack_size + 0xFFF) & 0xFFFFF000; stack_size = (stack_size + 0xFFF) & 0xFFFFF000;
uint32_t stack_alignment = (stack_size & 0xF000) ? 0x1000 : 0x10000; uint32_t stack_alignment = (stack_size & 0xF000) ? 0x1000 : 0x10000;
uint32_t stack_padding = uint32_t(xe::page_size()); // Host page size. uint32_t stack_padding =
uint32_t(xe::memory::page_size()); // Host page size.
uint32_t actual_stack_size = stack_padding + stack_size; uint32_t actual_stack_size = stack_padding + stack_size;
bool top_down; bool top_down;
switch (stack_type) { switch (stack_type) {

View File

@ -540,7 +540,7 @@ dword_result_t NtQueryInformationFile(
// TODO(benvanik): use pointer to fs:: entry? // TODO(benvanik): use pointer to fs:: entry?
xe::store_and_swap<uint64_t>(file_info_ptr, xe::store_and_swap<uint64_t>(file_info_ptr,
xe::hash_combine(0, file->path())); xe::memory::hash_combine(0, file->path()));
break; break;
case XFilePositionInformation: case XFilePositionInformation:
// struct FILE_POSITION_INFORMATION { // struct FILE_POSITION_INFORMATION {

View File

@ -82,7 +82,7 @@ Memory::Memory()
reserve_address_(0), reserve_address_(0),
mapping_(0), mapping_(0),
mapping_base_(nullptr) { mapping_base_(nullptr) {
system_page_size_ = uint32_t(xe::page_size()); system_page_size_ = uint32_t(xe::memory::page_size());
assert_zero(active_memory_); assert_zero(active_memory_);
active_memory_ = this; active_memory_ = this;
} }
@ -436,6 +436,18 @@ void Memory::DumpMap() {
XELOGE(""); XELOGE("");
} }
xe::memory::PageAccess ToPageAccess(uint32_t protect) {
DWORD result = 0;
if ((protect & kMemoryProtectRead) && !(protect & kMemoryProtectWrite)) {
return xe::memory::PageAccess::kReadOnly;
} else if ((protect & kMemoryProtectRead) &&
(protect & kMemoryProtectWrite)) {
return xe::memory::PageAccess::kReadWrite;
} else {
return xe::memory::PageAccess::kNoAccess;
}
}
DWORD ToWin32ProtectFlags(uint32_t protect) { DWORD ToWin32ProtectFlags(uint32_t protect) {
DWORD result = 0; DWORD result = 0;
if ((protect & kMemoryProtectRead) && !(protect & kMemoryProtectWrite)) { if ((protect & kMemoryProtectRead) && !(protect & kMemoryProtectWrite)) {
@ -831,14 +843,15 @@ bool BaseHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
return false; return false;
}*/ }*/
// Instead, we just protect it, if we can. // Instead, we just protect it, if we can.
if (page_size_ == xe::page_size() || if (page_size_ == xe::memory::page_size() ||
((base_page_entry.region_page_count * page_size_) % xe::page_size() == ((base_page_entry.region_page_count * page_size_) %
xe::memory::page_size() ==
0) && 0) &&
((base_page_number * page_size_) % xe::page_size() == 0)) { ((base_page_number * page_size_) % xe::memory::page_size() == 0)) {
DWORD old_protect; if (!xe::memory::Protect(
if (!VirtualProtect(membase_ + heap_base_ + base_page_number * page_size_, membase_ + heap_base_ + base_page_number * page_size_,
base_page_entry.region_page_count * page_size_, base_page_entry.region_page_count * page_size_,
PAGE_NOACCESS, &old_protect)) { xe::memory::PageAccess::kNoAccess, nullptr)) {
XELOGW("BaseHeap::Release failed due to host VirtualProtect failure"); XELOGW("BaseHeap::Release failed due to host VirtualProtect failure");
} }
} }
@ -884,13 +897,12 @@ bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
// Attempt host change (hopefully won't fail). // Attempt host change (hopefully won't fail).
// We can only do this if our size matches system page granularity. // We can only do this if our size matches system page granularity.
if (page_size_ == xe::page_size() || if (page_size_ == xe::memory::page_size() ||
((page_count * page_size_) % xe::page_size() == 0) && ((page_count * page_size_) % xe::memory::page_size() == 0) &&
((start_page_number * page_size_) % xe::page_size() == 0)) { ((start_page_number * page_size_) % xe::memory::page_size() == 0)) {
DWORD new_protect = ToWin32ProtectFlags(protect); if (!xe::memory::Protect(
DWORD old_protect; membase_ + heap_base_ + start_page_number * page_size_,
if (!VirtualProtect(membase_ + heap_base_ + start_page_number * page_size_, page_count * page_size_, ToPageAccess(protect), nullptr)) {
page_count * page_size_, new_protect, &old_protect)) {
XELOGE("BaseHeap::Protect failed due to host VirtualProtect failure"); XELOGE("BaseHeap::Protect failed due to host VirtualProtect failure");
return false; return false;
} }