diff --git a/libxenia-base.vcxproj b/libxenia-base.vcxproj
index b78d7b4d9..29bc2dfd2 100644
--- a/libxenia-base.vcxproj
+++ b/libxenia-base.vcxproj
@@ -24,7 +24,8 @@
-
+
+
diff --git a/libxenia-base.vcxproj.filters b/libxenia-base.vcxproj.filters
index 54bd9ed4a..10b804bdd 100644
--- a/libxenia-base.vcxproj.filters
+++ b/libxenia-base.vcxproj.filters
@@ -36,9 +36,6 @@
src\xenia\base
-
- src\xenia\base
-
src\xenia\base
@@ -57,6 +54,12 @@
src\xenia\base
+
+ src\xenia\base
+
+
+ src\xenia\base
+
diff --git a/src/xenia/base/memory_generic.cc b/src/xenia/base/memory.cc
similarity index 100%
rename from src/xenia/base/memory_generic.cc
rename to src/xenia/base/memory.cc
diff --git a/src/xenia/base/memory.h b/src/xenia/base/memory.h
index 45f6b3422..80931fbe3 100644
--- a/src/xenia/base/memory.h
+++ b/src/xenia/base/memory.h
@@ -18,6 +18,21 @@
#include "xenia/base/byte_order.h"
namespace xe {
+namespace memory {
+
+// Returns the native page size of the system, in bytes.
+size_t page_size();
+
+enum class PageAccess {
+ kNoAccess = 0,
+ kReadOnly,
+ kReadWrite,
+};
+
+// Sets the access rights for the given block of memory and returns the previous
+// access rights. Both base_address and length will be adjusted to page_size().
+bool Protect(void* base_address, size_t length, PageAccess access,
+ PageAccess* out_old_access);
inline size_t hash_combine(size_t seed) { return seed; }
@@ -28,6 +43,10 @@ size_t hash_combine(size_t seed, const T& v, const Ts&... vs) {
return hash_combine(seed, vs...);
}
+} // namespace memory
+
+// TODO(benvanik): move into xe::memory::
+
constexpr void* low_address(void* address) {
return (void*)(uint64_t(address) & 0xFFFFFFFF);
}
diff --git a/src/xenia/base/memory_win.cc b/src/xenia/base/memory_win.cc
new file mode 100644
index 000000000..c95d1f078
--- /dev/null
+++ b/src/xenia/base/memory_win.cc
@@ -0,0 +1,73 @@
+/**
+ ******************************************************************************
+ * Xenia : Xbox 360 Emulator Research Project *
+ ******************************************************************************
+ * Copyright 2015 Ben Vanik. All rights reserved. *
+ * Released under the BSD license - see LICENSE in the root for more details. *
+ ******************************************************************************
+ */
+
+#include "xenia/base/memory.h"
+
+#include "xenia/base/platform_win.h"
+
+namespace xe {
+namespace memory {
+
+size_t page_size() {
+ static size_t value = 0;
+ if (!value) {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ value = si.dwAllocationGranularity;
+ }
+ return value;
+}
+
+bool Protect(void* base_address, size_t length, PageAccess access,
+ PageAccess* out_old_access) {
+ if (out_old_access) {
+ *out_old_access = PageAccess::kNoAccess;
+ }
+ DWORD new_protect;
+ switch (access) {
+ case PageAccess::kNoAccess:
+ new_protect = PAGE_NOACCESS;
+ break;
+ case PageAccess::kReadOnly:
+ new_protect = PAGE_READONLY;
+ break;
+ case PageAccess::kReadWrite:
+ new_protect = PAGE_READWRITE;
+ break;
+ default:
+ assert_unhandled_case(access);
+ break;
+ }
+ DWORD old_protect = 0;
+ BOOL result = VirtualProtect(base_address, length, new_protect, &old_protect);
+ if (result) {
+ if (out_old_access) {
+ switch (old_protect) {
+ case PAGE_NOACCESS:
+ *out_old_access = PageAccess::kNoAccess;
+ break;
+ case PAGE_READONLY:
+ *out_old_access = PageAccess::kReadOnly;
+ break;
+ case PAGE_READWRITE:
+ *out_old_access = PageAccess::kReadWrite;
+ break;
+ default:
+ assert_unhandled_case(access);
+ break;
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+} // namespace memory
+} // namespace xe
diff --git a/src/xenia/base/platform.h b/src/xenia/base/platform.h
index 9c4fcbba8..34b151baf 100644
--- a/src/xenia/base/platform.h
+++ b/src/xenia/base/platform.h
@@ -76,9 +76,6 @@ const size_t max_path = 1024; // PATH_MAX
// Launches a web browser to the given URL.
void LaunchBrowser(const char* url);
-// Returns the native page size of the system, in bytes.
-size_t page_size();
-
} // namespace xe
#endif // XENIA_BASE_PLATFORM_H_
diff --git a/src/xenia/base/platform_win.cc b/src/xenia/base/platform_win.cc
index b363edd61..157fefd52 100644
--- a/src/xenia/base/platform_win.cc
+++ b/src/xenia/base/platform_win.cc
@@ -11,16 +11,6 @@
namespace xe {
-size_t page_size() {
- static size_t value = 0;
- if (!value) {
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- value = si.dwAllocationGranularity;
- }
- return value;
-}
-
void LaunchBrowser(const char* url) {
ShellExecuteA(NULL, "open", url, NULL, NULL, SW_SHOWNORMAL);
}
diff --git a/src/xenia/cpu/mmio_handler.cc b/src/xenia/cpu/mmio_handler.cc
index 1f38be10b..6be6992da 100644
--- a/src/xenia/cpu/mmio_handler.cc
+++ b/src/xenia/cpu/mmio_handler.cc
@@ -13,7 +13,6 @@
#include "xenia/base/byte_order.h"
#include "xenia/base/math.h"
#include "xenia/base/memory.h"
-#include "xenia/base/platform_win.h"
namespace BE {
#include
@@ -105,9 +104,9 @@ uintptr_t MMIOHandler::AddPhysicalWriteWatch(uint32_t guest_address,
// This means we need to round up, which will cause spurious access
// violations and invalidations.
// TODO(benvanik): only invalidate if actually within the region?
- length =
- xe::round_up(length + (base_address % xe::page_size()), xe::page_size());
- base_address = base_address - (base_address % xe::page_size());
+ length = xe::round_up(length + (base_address % xe::memory::page_size()),
+ xe::memory::page_size());
+ base_address = base_address - (base_address % xe::memory::page_size());
// Add to table. The slot reservation may evict a previous watch, which
// could include our target, so we do it first.
@@ -122,29 +121,33 @@ uintptr_t MMIOHandler::AddPhysicalWriteWatch(uint32_t guest_address,
write_watch_mutex_.unlock();
// Make the desired range read only under all address spaces.
- DWORD old_protect;
- VirtualProtect(physical_membase_ + entry->address, entry->length,
- PAGE_READONLY, &old_protect);
- VirtualProtect(virtual_membase_ + 0xA0000000 + entry->address, entry->length,
- PAGE_READONLY, &old_protect);
- VirtualProtect(virtual_membase_ + 0xC0000000 + entry->address, entry->length,
- PAGE_READONLY, &old_protect);
- VirtualProtect(virtual_membase_ + 0xE0000000 + entry->address, entry->length,
- PAGE_READONLY, &old_protect);
+ xe::memory::Protect(physical_membase_ + entry->address, entry->length,
+ xe::memory::PageAccess::kReadOnly, nullptr);
+ xe::memory::Protect(virtual_membase_ + 0xA0000000 + entry->address,
+ entry->length, xe::memory::PageAccess::kReadOnly,
+ nullptr);
+ xe::memory::Protect(virtual_membase_ + 0xC0000000 + entry->address,
+ entry->length, xe::memory::PageAccess::kReadOnly,
+ nullptr);
+ xe::memory::Protect(virtual_membase_ + 0xE0000000 + entry->address,
+ entry->length, xe::memory::PageAccess::kReadOnly,
+ nullptr);
return reinterpret_cast(entry);
}
void MMIOHandler::ClearWriteWatch(WriteWatchEntry* entry) {
- DWORD old_protect;
- VirtualProtect(physical_membase_ + entry->address, entry->length,
- PAGE_READWRITE, &old_protect);
- VirtualProtect(virtual_membase_ + 0xA0000000 + entry->address, entry->length,
- PAGE_READWRITE, &old_protect);
- VirtualProtect(virtual_membase_ + 0xC0000000 + entry->address, entry->length,
- PAGE_READWRITE, &old_protect);
- VirtualProtect(virtual_membase_ + 0xE0000000 + entry->address, entry->length,
- PAGE_READWRITE, &old_protect);
+ xe::memory::Protect(physical_membase_ + entry->address, entry->length,
+ xe::memory::PageAccess::kReadWrite, nullptr);
+ xe::memory::Protect(virtual_membase_ + 0xA0000000 + entry->address,
+ entry->length, xe::memory::PageAccess::kReadWrite,
+ nullptr);
+ xe::memory::Protect(virtual_membase_ + 0xC0000000 + entry->address,
+ entry->length, xe::memory::PageAccess::kReadWrite,
+ nullptr);
+ xe::memory::Protect(virtual_membase_ + 0xE0000000 + entry->address,
+ entry->length, xe::memory::PageAccess::kReadWrite,
+ nullptr);
}
void MMIOHandler::CancelWriteWatch(uintptr_t watch_handle) {
diff --git a/src/xenia/cpu/thread_state.cc b/src/xenia/cpu/thread_state.cc
index a839ba8cc..874e194f8 100644
--- a/src/xenia/cpu/thread_state.cc
+++ b/src/xenia/cpu/thread_state.cc
@@ -52,7 +52,8 @@ ThreadState::ThreadState(Processor* processor, uint32_t thread_id,
// only Protect() on system page granularity.
stack_size = (stack_size + 0xFFF) & 0xFFFFF000;
uint32_t stack_alignment = (stack_size & 0xF000) ? 0x1000 : 0x10000;
- uint32_t stack_padding = uint32_t(xe::page_size()); // Host page size.
+ uint32_t stack_padding =
+ uint32_t(xe::memory::page_size()); // Host page size.
uint32_t actual_stack_size = stack_padding + stack_size;
bool top_down;
switch (stack_type) {
diff --git a/src/xenia/kernel/xboxkrnl_io.cc b/src/xenia/kernel/xboxkrnl_io.cc
index 8ca2b0665..26d52e6b6 100644
--- a/src/xenia/kernel/xboxkrnl_io.cc
+++ b/src/xenia/kernel/xboxkrnl_io.cc
@@ -540,7 +540,7 @@ dword_result_t NtQueryInformationFile(
// TODO(benvanik): use pointer to fs:: entry?
xe::store_and_swap(file_info_ptr,
- xe::hash_combine(0, file->path()));
+ xe::memory::hash_combine(0, file->path()));
break;
case XFilePositionInformation:
// struct FILE_POSITION_INFORMATION {
diff --git a/src/xenia/memory.cc b/src/xenia/memory.cc
index ef3b919d7..05d1945f9 100644
--- a/src/xenia/memory.cc
+++ b/src/xenia/memory.cc
@@ -82,7 +82,7 @@ Memory::Memory()
reserve_address_(0),
mapping_(0),
mapping_base_(nullptr) {
- system_page_size_ = uint32_t(xe::page_size());
+ system_page_size_ = uint32_t(xe::memory::page_size());
assert_zero(active_memory_);
active_memory_ = this;
}
@@ -436,6 +436,18 @@ void Memory::DumpMap() {
XELOGE("");
}
+xe::memory::PageAccess ToPageAccess(uint32_t protect) {
+ DWORD result = 0;
+ if ((protect & kMemoryProtectRead) && !(protect & kMemoryProtectWrite)) {
+ return xe::memory::PageAccess::kReadOnly;
+ } else if ((protect & kMemoryProtectRead) &&
+ (protect & kMemoryProtectWrite)) {
+ return xe::memory::PageAccess::kReadWrite;
+ } else {
+ return xe::memory::PageAccess::kNoAccess;
+ }
+}
+
DWORD ToWin32ProtectFlags(uint32_t protect) {
DWORD result = 0;
if ((protect & kMemoryProtectRead) && !(protect & kMemoryProtectWrite)) {
@@ -831,14 +843,15 @@ bool BaseHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
return false;
}*/
// Instead, we just protect it, if we can.
- if (page_size_ == xe::page_size() ||
- ((base_page_entry.region_page_count * page_size_) % xe::page_size() ==
+ if (page_size_ == xe::memory::page_size() ||
+ ((base_page_entry.region_page_count * page_size_) %
+ xe::memory::page_size() ==
0) &&
- ((base_page_number * page_size_) % xe::page_size() == 0)) {
- DWORD old_protect;
- if (!VirtualProtect(membase_ + heap_base_ + base_page_number * page_size_,
- base_page_entry.region_page_count * page_size_,
- PAGE_NOACCESS, &old_protect)) {
+ ((base_page_number * page_size_) % xe::memory::page_size() == 0)) {
+ if (!xe::memory::Protect(
+ membase_ + heap_base_ + base_page_number * page_size_,
+ base_page_entry.region_page_count * page_size_,
+ xe::memory::PageAccess::kNoAccess, nullptr)) {
XELOGW("BaseHeap::Release failed due to host VirtualProtect failure");
}
}
@@ -884,13 +897,12 @@ bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
// Attempt host change (hopefully won't fail).
// We can only do this if our size matches system page granularity.
- if (page_size_ == xe::page_size() ||
- ((page_count * page_size_) % xe::page_size() == 0) &&
- ((start_page_number * page_size_) % xe::page_size() == 0)) {
- DWORD new_protect = ToWin32ProtectFlags(protect);
- DWORD old_protect;
- if (!VirtualProtect(membase_ + heap_base_ + start_page_number * page_size_,
- page_count * page_size_, new_protect, &old_protect)) {
+ if (page_size_ == xe::memory::page_size() ||
+ ((page_count * page_size_) % xe::memory::page_size() == 0) &&
+ ((start_page_number * page_size_) % xe::memory::page_size() == 0)) {
+ if (!xe::memory::Protect(
+ membase_ + heap_base_ + start_page_number * page_size_,
+ page_count * page_size_, ToPageAccess(protect), nullptr)) {
XELOGE("BaseHeap::Protect failed due to host VirtualProtect failure");
return false;
}