Physical write watches -> access watches (read and/or write watching)

This commit is contained in:
Dr. Chat 2016-03-17 21:55:16 -05:00
parent 1831e7a936
commit 0e3c113375
6 changed files with 189 additions and 103 deletions

View File

@ -87,13 +87,12 @@ bool MMIOHandler::CheckStore(uint32_t virtual_address, uint32_t value) {
return false; return false;
} }
uintptr_t MMIOHandler::AddPhysicalWriteWatch(uint32_t guest_address, uintptr_t MMIOHandler::AddPhysicalAccessWatch(uint32_t guest_address,
size_t length, size_t length, WatchType type,
WriteWatchCallback callback, AccessWatchCallback callback,
void* callback_context, void* callback_context,
void* callback_data) { void* callback_data) {
uint32_t base_address = guest_address; uint32_t base_address = guest_address & 0x1FFFFFFF;
assert_true(base_address < 0x1FFFFFFF);
// Can only protect sizes matching system page size. // Can only protect sizes matching system page size.
// This means we need to round up, which will cause spurious access // This means we need to round up, which will cause spurious access
@ -103,32 +102,45 @@ uintptr_t MMIOHandler::AddPhysicalWriteWatch(uint32_t guest_address,
xe::memory::page_size()); xe::memory::page_size());
base_address = base_address - (base_address % xe::memory::page_size()); base_address = base_address - (base_address % xe::memory::page_size());
auto lock = global_critical_region_.Acquire();
// Add to table. The slot reservation may evict a previous watch, which // Add to table. The slot reservation may evict a previous watch, which
// could include our target, so we do it first. // could include our target, so we do it first.
auto entry = new WriteWatchEntry(); auto entry = new AccessWatchEntry();
entry->address = base_address; entry->address = base_address;
entry->length = uint32_t(length); entry->length = uint32_t(length);
entry->callback = callback; entry->callback = callback;
entry->callback_context = callback_context; entry->callback_context = callback_context;
entry->callback_data = callback_data; entry->callback_data = callback_data;
global_critical_region_.mutex().lock(); access_watches_.push_back(entry);
write_watches_.push_back(entry);
global_critical_region_.mutex().unlock();
// Make the desired range read only under all address spaces. auto page_access = memory::PageAccess::kNoAccess;
switch (type) {
case kWatchWrite:
page_access = memory::PageAccess::kReadOnly;
break;
case kWatchReadWrite:
page_access = memory::PageAccess::kNoAccess;
break;
default:
assert_unhandled_case(type);
break;
}
// Protect the range under all address spaces
memory::Protect(physical_membase_ + entry->address, entry->length, memory::Protect(physical_membase_ + entry->address, entry->length,
xe::memory::PageAccess::kReadOnly, nullptr); page_access, nullptr);
memory::Protect(virtual_membase_ + 0xA0000000 + entry->address, entry->length, memory::Protect(virtual_membase_ + 0xA0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadOnly, nullptr); page_access, nullptr);
memory::Protect(virtual_membase_ + 0xC0000000 + entry->address, entry->length, memory::Protect(virtual_membase_ + 0xC0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadOnly, nullptr); page_access, nullptr);
memory::Protect(virtual_membase_ + 0xE0000000 + entry->address, entry->length, memory::Protect(virtual_membase_ + 0xE0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadOnly, nullptr); page_access, nullptr);
return reinterpret_cast<uintptr_t>(entry); return reinterpret_cast<uintptr_t>(entry);
} }
void MMIOHandler::ClearWriteWatch(WriteWatchEntry* entry) { void MMIOHandler::ClearAccessWatch(AccessWatchEntry* entry) {
memory::Protect(physical_membase_ + entry->address, entry->length, memory::Protect(physical_membase_ + entry->address, entry->length,
xe::memory::PageAccess::kReadWrite, nullptr); xe::memory::PageAccess::kReadWrite, nullptr);
memory::Protect(virtual_membase_ + 0xA0000000 + entry->address, entry->length, memory::Protect(virtual_membase_ + 0xA0000000 + entry->address, entry->length,
@ -139,19 +151,20 @@ void MMIOHandler::ClearWriteWatch(WriteWatchEntry* entry) {
xe::memory::PageAccess::kReadWrite, nullptr); xe::memory::PageAccess::kReadWrite, nullptr);
} }
void MMIOHandler::CancelWriteWatch(uintptr_t watch_handle) { void MMIOHandler::CancelAccessWatch(uintptr_t watch_handle) {
auto entry = reinterpret_cast<WriteWatchEntry*>(watch_handle); auto entry = reinterpret_cast<AccessWatchEntry*>(watch_handle);
auto lock = global_critical_region_.Acquire();
// Allow access to the range again. // Allow access to the range again.
ClearWriteWatch(entry); ClearAccessWatch(entry);
// Remove from table. // Remove from table.
global_critical_region_.mutex().lock(); auto it = std::find(access_watches_.begin(), access_watches_.end(), entry);
auto it = std::find(write_watches_.begin(), write_watches_.end(), entry); assert_false(it == access_watches_.end());
if (it != write_watches_.end()) {
write_watches_.erase(it); if (it != access_watches_.end()) {
access_watches_.erase(it);
} }
global_critical_region_.mutex().unlock();
delete entry; delete entry;
} }
@ -159,18 +172,19 @@ void MMIOHandler::CancelWriteWatch(uintptr_t watch_handle) {
void MMIOHandler::InvalidateRange(uint32_t physical_address, size_t length) { void MMIOHandler::InvalidateRange(uint32_t physical_address, size_t length) {
auto lock = global_critical_region_.Acquire(); auto lock = global_critical_region_.Acquire();
for (auto it = write_watches_.begin(); it != write_watches_.end();) { for (auto it = access_watches_.begin(); it != access_watches_.end();) {
auto entry = *it; auto entry = *it;
if ((entry->address <= physical_address && if ((entry->address <= physical_address &&
entry->address + entry->length > physical_address) || entry->address + entry->length > physical_address) ||
(entry->address >= physical_address && (entry->address >= physical_address &&
entry->address < physical_address + length)) { entry->address < physical_address + length)) {
// This watch lies within the range. End it. // This watch lies within the range. End it.
ClearWriteWatch(entry); ClearAccessWatch(entry);
entry->callback(entry->callback_context, entry->callback_data, entry->callback(entry->callback_context, entry->callback_data,
entry->address); entry->address);
it = write_watches_.erase(it); it = access_watches_.erase(it);
delete entry;
continue; continue;
} }
@ -178,50 +192,49 @@ void MMIOHandler::InvalidateRange(uint32_t physical_address, size_t length) {
} }
} }
bool MMIOHandler::CheckWriteWatch(uint64_t fault_address) { bool MMIOHandler::IsRangeWatched(uint32_t physical_address, size_t length) {
uint32_t physical_address = uint32_t(fault_address); auto lock = global_critical_region_.Acquire();
if (physical_address > 0x1FFFFFFF) {
physical_address &= 0x1FFFFFFF; for (auto it = access_watches_.begin(); it != access_watches_.end(); ++it) {
} auto entry = *it;
std::list<WriteWatchEntry*> pending_invalidates; if ((entry->address <= physical_address &&
global_critical_region_.mutex().lock(); entry->address + entry->length > physical_address) ||
// Now that we hold the lock, recheck and see if the pages are still (entry->address >= physical_address &&
// protected. entry->address < physical_address + length)) {
memory::PageAccess cur_access; // This watch lies within the range.
size_t page_length = memory::page_size(); return true;
memory::QueryProtect((void*)fault_address, page_length, cur_access); }
if (cur_access != memory::PageAccess::kReadOnly &&
cur_access != memory::PageAccess::kNoAccess) {
// Another thread has cleared this write watch. Abort.
global_critical_region_.mutex().unlock();
return true;
} }
for (auto it = write_watches_.begin(); it != write_watches_.end();) { return false;
}
bool MMIOHandler::CheckAccessWatch(uint32_t physical_address) {
auto lock = global_critical_region_.Acquire();
bool hit = false;
for (auto it = access_watches_.begin(); it != access_watches_.end();) {
auto entry = *it; auto entry = *it;
if (entry->address <= physical_address && if (entry->address <= physical_address &&
entry->address + entry->length > physical_address) { entry->address + entry->length > physical_address) {
// Hit! Remove the writewatch. // Hit! Remove the watch.
pending_invalidates.push_back(entry); hit = true;
ClearAccessWatch(entry);
entry->callback(entry->callback_context, entry->callback_data,
physical_address);
ClearWriteWatch(entry); it = access_watches_.erase(it);
it = write_watches_.erase(it); delete entry;
continue; continue;
} }
++it; ++it;
} }
global_critical_region_.mutex().unlock();
if (pending_invalidates.empty()) { if (!hit) {
// Rethrow access violation - range was not being watched. // Rethrow access violation - range was not being watched.
return false; return false;
} }
while (!pending_invalidates.empty()) {
auto entry = pending_invalidates.back();
pending_invalidates.pop_back();
entry->callback(entry->callback_context, entry->callback_data,
physical_address);
delete entry;
}
// Range was watched, so lets eat this access violation. // Range was watched, so lets eat this access violation.
return true; return true;
} }
@ -414,9 +427,33 @@ bool MMIOHandler::ExceptionCallback(Exception* ex) {
} }
} }
if (!range) { if (!range) {
auto fault_address = reinterpret_cast<uint8_t*>(ex->fault_address());
uint32_t guest_address = 0;
if (fault_address >= virtual_membase_ &&
fault_address < physical_membase_) {
// Faulting on a virtual address.
guest_address = static_cast<uint32_t>(ex->fault_address()) & 0x1FFFFFFF;
} else {
// Faulting on a physical address.
guest_address = static_cast<uint32_t>(ex->fault_address());
}
// HACK: Recheck if the pages are still protected (race condition - another
// thread clears the writewatch we just hit)
// Do this under the lock so we don't introduce another race condition.
auto lock = global_critical_region_.Acquire();
memory::PageAccess cur_access;
size_t page_length = memory::page_size();
memory::QueryProtect((void*)fault_address, page_length, cur_access);
if (cur_access != memory::PageAccess::kReadOnly &&
cur_access != memory::PageAccess::kNoAccess) {
// Another thread has cleared this write watch. Abort.
return true;
}
// Access is not found within any range, so fail and let the caller handle // Access is not found within any range, so fail and let the caller handle
// it (likely by aborting). // it (likely by aborting).
return CheckWriteWatch(ex->fault_address()); return CheckAccessWatch(guest_address);
} }
auto rip = ex->pc(); auto rip = ex->pc();

View File

@ -28,9 +28,8 @@ typedef uint32_t (*MMIOReadCallback)(void* ppc_context, void* callback_context,
uint32_t addr); uint32_t addr);
typedef void (*MMIOWriteCallback)(void* ppc_context, void* callback_context, typedef void (*MMIOWriteCallback)(void* ppc_context, void* callback_context,
uint32_t addr, uint32_t value); uint32_t addr, uint32_t value);
typedef void (*AccessWatchCallback)(void* context_ptr, void* data_ptr,
typedef void (*WriteWatchCallback)(void* context_ptr, void* data_ptr, uint32_t address);
uint32_t address);
struct MMIORange { struct MMIORange {
uint32_t address; uint32_t address;
@ -46,6 +45,12 @@ class MMIOHandler {
public: public:
virtual ~MMIOHandler(); virtual ~MMIOHandler();
enum WatchType {
kWatchInvalid = 0,
kWatchWrite = 1,
kWatchReadWrite = 2,
};
static std::unique_ptr<MMIOHandler> Install(uint8_t* virtual_membase, static std::unique_ptr<MMIOHandler> Install(uint8_t* virtual_membase,
uint8_t* physical_membase, uint8_t* physical_membase,
uint8_t* membase_end); uint8_t* membase_end);
@ -59,17 +64,24 @@ class MMIOHandler {
bool CheckLoad(uint32_t virtual_address, uint32_t* out_value); bool CheckLoad(uint32_t virtual_address, uint32_t* out_value);
bool CheckStore(uint32_t virtual_address, uint32_t value); bool CheckStore(uint32_t virtual_address, uint32_t value);
uintptr_t AddPhysicalWriteWatch(uint32_t guest_address, size_t length, // Memory watches: These are one-shot alarms that fire a callback (in the
WriteWatchCallback callback, // context of the thread that caused the callback) when a memory range is
void* callback_context, void* callback_data); // either written to or read from, depending on the watch type. These fire as
void CancelWriteWatch(uintptr_t watch_handle); // soon as a read/write happens, and only fire once.
// These watches may be spuriously fired if memory is accessed nearby.
uintptr_t AddPhysicalAccessWatch(uint32_t guest_address, size_t length,
WatchType type, AccessWatchCallback callback,
void* callback_context, void* callback_data);
void CancelAccessWatch(uintptr_t watch_handle);
void InvalidateRange(uint32_t physical_address, size_t length); void InvalidateRange(uint32_t physical_address, size_t length);
bool IsRangeWatched(uint32_t physical_address, size_t length);
protected: protected:
struct WriteWatchEntry { struct AccessWatchEntry {
uint32_t address; uint32_t address;
uint32_t length; uint32_t length;
WriteWatchCallback callback; WatchType type;
AccessWatchCallback callback;
void* callback_context; void* callback_context;
void* callback_data; void* callback_data;
}; };
@ -83,8 +95,8 @@ class MMIOHandler {
static bool ExceptionCallbackThunk(Exception* ex, void* data); static bool ExceptionCallbackThunk(Exception* ex, void* data);
bool ExceptionCallback(Exception* ex); bool ExceptionCallback(Exception* ex);
void ClearWriteWatch(WriteWatchEntry* entry); void ClearAccessWatch(AccessWatchEntry* entry);
bool CheckWriteWatch(uint64_t fault_address); bool CheckAccessWatch(uint32_t guest_address);
uint8_t* virtual_membase_; uint8_t* virtual_membase_;
uint8_t* physical_membase_; uint8_t* physical_membase_;
@ -94,7 +106,7 @@ class MMIOHandler {
xe::global_critical_region global_critical_region_; xe::global_critical_region global_critical_region_;
// TODO(benvanik): data structure magic. // TODO(benvanik): data structure magic.
std::list<WriteWatchEntry*> write_watches_; std::list<AccessWatchEntry*> access_watches_;
static MMIOHandler* global_handler_; static MMIOHandler* global_handler_;
}; };

View File

@ -427,7 +427,7 @@ TextureCache::TextureEntry* TextureCache::LookupOrInsertTexture(
// Not found, create. // Not found, create.
auto entry = std::make_unique<TextureEntry>(); auto entry = std::make_unique<TextureEntry>();
entry->texture_info = texture_info; entry->texture_info = texture_info;
entry->write_watch_handle = 0; entry->access_watch_handle = 0;
entry->pending_invalidation = false; entry->pending_invalidation = false;
entry->handle = 0; entry->handle = 0;
@ -442,6 +442,7 @@ TextureCache::TextureEntry* TextureCache::LookupOrInsertTexture(
// Found! Acquire the handle and remove the readbuffer entry. // Found! Acquire the handle and remove the readbuffer entry.
read_buffer_textures_.erase(it); read_buffer_textures_.erase(it);
entry->handle = read_buffer_entry->handle; entry->handle = read_buffer_entry->handle;
entry->access_watch_handle = read_buffer_entry->access_watch_handle;
delete read_buffer_entry; delete read_buffer_entry;
// TODO(benvanik): set more texture properties? swizzle/etc? // TODO(benvanik): set more texture properties? swizzle/etc?
auto entry_ptr = entry.get(); auto entry_ptr = entry.get();
@ -495,14 +496,15 @@ TextureCache::TextureEntry* TextureCache::LookupOrInsertTexture(
// Add a write watch. If any data in the given range is touched we'll get a // Add a write watch. If any data in the given range is touched we'll get a
// callback and evict the texture. We could reuse the storage, though the // callback and evict the texture. We could reuse the storage, though the
// driver is likely in a better position to pool that kind of stuff. // driver is likely in a better position to pool that kind of stuff.
entry->write_watch_handle = memory_->AddPhysicalWriteWatch( entry->access_watch_handle = memory_->AddPhysicalAccessWatch(
texture_info.guest_address, texture_info.input_length, texture_info.guest_address, texture_info.input_length,
cpu::MMIOHandler::kWatchWrite,
[](void* context_ptr, void* data_ptr, uint32_t address) { [](void* context_ptr, void* data_ptr, uint32_t address) {
auto self = reinterpret_cast<TextureCache*>(context_ptr); auto self = reinterpret_cast<TextureCache*>(context_ptr);
auto touched_entry = reinterpret_cast<TextureEntry*>(data_ptr); auto touched_entry = reinterpret_cast<TextureEntry*>(data_ptr);
// Clear watch handle first so we don't redundantly // Clear watch handle first so we don't redundantly
// remove. // remove.
touched_entry->write_watch_handle = 0; touched_entry->access_watch_handle = 0;
touched_entry->pending_invalidation = true; touched_entry->pending_invalidation = true;
// Add to pending list so Scavenge will clean it up. // Add to pending list so Scavenge will clean it up.
self->invalidated_textures_mutex_.lock(); self->invalidated_textures_mutex_.lock();
@ -574,14 +576,27 @@ GLuint TextureCache::ConvertTexture(Blitter* blitter, uint32_t guest_address,
dest_rect, GL_LINEAR, swap_channels); dest_rect, GL_LINEAR, swap_channels);
} }
// HACK: remove texture from write watch list so readback won't kill us. // Setup a read/write access watch. If the game tries to touch the memory
// Not needed now, as readback is disabled. // we were supposed to populate with this texture, then we'll actually
/* // populate it.
if (texture_entry->write_watch_handle) { if (texture_entry->access_watch_handle) {
memory_->CancelWriteWatch(texture_entry->write_watch_handle); memory_->CancelAccessWatch(texture_entry->access_watch_handle);
texture_entry->write_watch_handle = 0; texture_entry->access_watch_handle = 0;
} }
//*/
texture_entry->access_watch_handle = memory_->AddPhysicalAccessWatch(
guest_address, texture_entry->texture_info.input_length,
cpu::MMIOHandler::kWatchReadWrite,
[](void* context, void* data, uint32_t address) {
auto touched_entry = reinterpret_cast<TextureEntry*>(data);
touched_entry->access_watch_handle = 0;
// This happens. RDR resolves to a texture then upsizes it, BF1943
// writes to a resolved texture.
// TODO (for Vulkan): Copy this texture back into system memory.
// assert_always();
},
nullptr, texture_entry);
return texture_entry->handle; return texture_entry->handle;
} }
@ -618,6 +633,20 @@ GLuint TextureCache::ConvertTexture(Blitter* blitter, uint32_t guest_address,
entry->block_height = block_height; entry->block_height = block_height;
entry->format = format; entry->format = format;
entry->access_watch_handle = memory_->AddPhysicalAccessWatch(
guest_address, block_height * block_width * 4,
cpu::MMIOHandler::kWatchReadWrite,
[](void* context, void* data, uint32_t address) {
auto entry = reinterpret_cast<ReadBufferTexture*>(data);
entry->access_watch_handle = 0;
// This happens. RDR resolves to a texture then upsizes it, BF1943
// writes to a resolved texture.
// TODO (for Vulkan): Copy this texture back into system memory.
// assert_always();
},
nullptr, entry.get());
glCreateTextures(GL_TEXTURE_2D, 1, &entry->handle); glCreateTextures(GL_TEXTURE_2D, 1, &entry->handle);
glTextureParameteri(entry->handle, GL_TEXTURE_BASE_LEVEL, 0); glTextureParameteri(entry->handle, GL_TEXTURE_BASE_LEVEL, 0);
glTextureParameteri(entry->handle, GL_TEXTURE_MAX_LEVEL, 1); glTextureParameteri(entry->handle, GL_TEXTURE_MAX_LEVEL, 1);
@ -636,9 +665,9 @@ GLuint TextureCache::ConvertTexture(Blitter* blitter, uint32_t guest_address,
} }
void TextureCache::EvictTexture(TextureEntry* entry) { void TextureCache::EvictTexture(TextureEntry* entry) {
if (entry->write_watch_handle) { if (entry->access_watch_handle) {
memory_->CancelWriteWatch(entry->write_watch_handle); memory_->CancelAccessWatch(entry->access_watch_handle);
entry->write_watch_handle = 0; entry->access_watch_handle = 0;
} }
for (auto& view : entry->views) { for (auto& view : entry->views) {

View File

@ -44,7 +44,7 @@ class TextureCache {
}; };
struct TextureEntry { struct TextureEntry {
TextureInfo texture_info; TextureInfo texture_info;
uintptr_t write_watch_handle; uintptr_t access_watch_handle;
GLuint handle; GLuint handle;
bool pending_invalidation; bool pending_invalidation;
std::vector<std::unique_ptr<TextureEntryView>> views; std::vector<std::unique_ptr<TextureEntryView>> views;
@ -74,8 +74,12 @@ class TextureCache {
TextureFormat format, bool swap_channels, TextureFormat format, bool swap_channels,
GLuint src_texture, Rect2D src_rect, Rect2D dest_rect); GLuint src_texture, Rect2D src_rect, Rect2D dest_rect);
TextureEntry* LookupAddress(uint32_t guest_address, uint32_t width,
uint32_t height, TextureFormat format);
private: private:
struct ReadBufferTexture { struct ReadBufferTexture {
uintptr_t access_watch_handle;
uint32_t guest_address; uint32_t guest_address;
uint32_t logical_width; uint32_t logical_width;
uint32_t logical_height; uint32_t logical_height;
@ -90,8 +94,6 @@ class TextureCache {
void EvictSampler(SamplerEntry* entry); void EvictSampler(SamplerEntry* entry);
TextureEntry* LookupOrInsertTexture(const TextureInfo& texture_info, TextureEntry* LookupOrInsertTexture(const TextureInfo& texture_info,
uint64_t opt_hash = 0); uint64_t opt_hash = 0);
TextureEntry* LookupAddress(uint32_t guest_address, uint32_t width,
uint32_t height, TextureFormat format);
void EvictTexture(TextureEntry* entry); void EvictTexture(TextureEntry* entry);
bool UploadTexture2D(GLuint texture, const TextureInfo& texture_info); bool UploadTexture2D(GLuint texture, const TextureInfo& texture_info);

View File

@ -376,17 +376,19 @@ cpu::MMIORange* Memory::LookupVirtualMappedRange(uint32_t virtual_address) {
return mmio_handler_->LookupRange(virtual_address); return mmio_handler_->LookupRange(virtual_address);
} }
uintptr_t Memory::AddPhysicalWriteWatch(uint32_t physical_address, uintptr_t Memory::AddPhysicalAccessWatch(uint32_t physical_address,
uint32_t length, uint32_t length,
cpu::WriteWatchCallback callback, cpu::MMIOHandler::WatchType type,
void* callback_context, cpu::AccessWatchCallback callback,
void* callback_data) { void* callback_context,
return mmio_handler_->AddPhysicalWriteWatch( void* callback_data) {
physical_address, length, callback, callback_context, callback_data); return mmio_handler_->AddPhysicalAccessWatch(physical_address, length, type,
callback, callback_context,
callback_data);
} }
void Memory::CancelWriteWatch(uintptr_t watch_handle) { void Memory::CancelAccessWatch(uintptr_t watch_handle) {
mmio_handler_->CancelWriteWatch(watch_handle); mmio_handler_->CancelAccessWatch(watch_handle);
} }
uint32_t Memory::SystemHeapAlloc(uint32_t size, uint32_t alignment, uint32_t Memory::SystemHeapAlloc(uint32_t size, uint32_t alignment,
@ -453,6 +455,7 @@ bool Memory::Save(ByteStream* stream) {
} }
bool Memory::Restore(ByteStream* stream) { bool Memory::Restore(ByteStream* stream) {
XELOGD("Restoring memory...");
heaps_.v00000000.Restore(stream); heaps_.v00000000.Restore(stream);
heaps_.v40000000.Restore(stream); heaps_.v40000000.Restore(stream);
heaps_.v80000000.Restore(stream); heaps_.v80000000.Restore(stream);
@ -577,6 +580,8 @@ bool BaseHeap::Save(ByteStream* stream) {
} }
bool BaseHeap::Restore(ByteStream* stream) { bool BaseHeap::Restore(ByteStream* stream) {
XELOGD("Heap %.8X-%.8X", heap_base_, heap_base_ + heap_size_);
for (size_t i = 0; i < page_table_.size(); i++) { for (size_t i = 0; i < page_table_.size(); i++) {
auto& page = page_table_[i]; auto& page = page_table_[i];
page.qword = stream->Read<uint64_t>(); page.qword = stream->Read<uint64_t>();
@ -897,7 +902,7 @@ bool BaseHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
auto base_page_entry = page_table_[base_page_number]; auto base_page_entry = page_table_[base_page_number];
if (base_page_entry.base_address != base_page_number) { if (base_page_entry.base_address != base_page_number) {
XELOGE("BaseHeap::Release failed because address is not a region start"); XELOGE("BaseHeap::Release failed because address is not a region start");
// return false; return false;
} }
if (out_region_size) { if (out_region_size) {

View File

@ -303,12 +303,13 @@ class Memory {
// //
// This has a significant performance penalty for writes in in the range or // This has a significant performance penalty for writes in in the range or
// nearby (sharing 64KiB pages). // nearby (sharing 64KiB pages).
uintptr_t AddPhysicalWriteWatch(uint32_t physical_address, uint32_t length, uintptr_t AddPhysicalAccessWatch(uint32_t physical_address, uint32_t length,
cpu::WriteWatchCallback callback, cpu::MMIOHandler::WatchType type,
void* callback_context, void* callback_data); cpu::AccessWatchCallback callback,
void* callback_context, void* callback_data);
// Cancels a write watch requested with AddPhysicalWriteWatch. // Cancels a write watch requested with AddPhysicalAccessWatch.
void CancelWriteWatch(uintptr_t watch_handle); void CancelAccessWatch(uintptr_t watch_handle);
// Allocates virtual memory from the 'system' heap. // Allocates virtual memory from the 'system' heap.
// System memory is kept separate from game memory but is still accessible // System memory is kept separate from game memory but is still accessible