[Memory/Vulkan] Move old memory watches to the Vulkan backend

This commit is contained in:
Triang3l 2019-08-03 21:06:59 +03:00
parent 0370f8bbd9
commit d20c2fa9da
7 changed files with 192 additions and 355 deletions

View File

@ -16,7 +16,6 @@
#include "xenia/base/byte_order.h"
#include "xenia/base/exception_handler.h"
#include "xenia/base/logging.h"
#include "xenia/base/math.h"
#include "xenia/base/memory.h"
namespace xe {
@ -107,211 +106,6 @@ bool MMIOHandler::CheckStore(uint32_t virtual_address, uint32_t value) {
return false;
}
uintptr_t MMIOHandler::AddPhysicalAccessWatch(uint32_t guest_address,
size_t length, WatchType type,
AccessWatchCallback callback,
void* callback_context,
void* callback_data) {
uint32_t base_address = guest_address & 0x1FFFFFFF;
// Can only protect sizes matching system page size.
// This means we need to round up, which will cause spurious access
// violations and invalidations.
// TODO(benvanik): only invalidate if actually within the region?
length = xe::round_up(length + (base_address % xe::memory::page_size()),
xe::memory::page_size());
base_address = base_address - (base_address % xe::memory::page_size());
auto lock = global_critical_region_.Acquire();
// Fire any access watches that overlap this region.
for (auto it = access_watches_.begin(); it != access_watches_.end();) {
// Case 1: 2222222|222|11111111
// Case 2: 1111111|222|22222222
// Case 3: 1111111|222|11111111 (fragmentation)
// Case 4: 2222222|222|22222222 (complete overlap)
bool hit = false;
auto entry = *it;
if (base_address <= (*it)->address &&
base_address + length > (*it)->address) {
hit = true;
} else if ((*it)->address <= base_address &&
(*it)->address + (*it)->length > base_address) {
hit = true;
} else if ((*it)->address <= base_address &&
(*it)->address + (*it)->length > base_address + length) {
hit = true;
} else if ((*it)->address >= base_address &&
(*it)->address + (*it)->length < base_address + length) {
hit = true;
}
if (hit) {
FireAccessWatch(*it);
it = access_watches_.erase(it);
delete entry;
continue;
}
++it;
}
// Add to table. The slot reservation may evict a previous watch, which
// could include our target, so we do it first.
auto entry = new AccessWatchEntry();
entry->address = base_address;
entry->length = uint32_t(length);
entry->type = type;
entry->callback = callback;
entry->callback_context = callback_context;
entry->callback_data = callback_data;
access_watches_.push_back(entry);
auto page_access = memory::PageAccess::kNoAccess;
switch (type) {
case kWatchWrite:
page_access = memory::PageAccess::kReadOnly;
break;
case kWatchReadWrite:
page_access = memory::PageAccess::kNoAccess;
break;
default:
assert_unhandled_case(type);
break;
}
// Protect the range under all address spaces
memory::Protect(physical_membase_ + entry->address, entry->length,
page_access, nullptr);
memory::Protect(virtual_membase_ + 0xA0000000 + entry->address, entry->length,
page_access, nullptr);
memory::Protect(virtual_membase_ + 0xC0000000 + entry->address, entry->length,
page_access, nullptr);
memory::Protect(virtual_membase_ + 0xE0000000 + entry->address, entry->length,
page_access, nullptr);
return reinterpret_cast<uintptr_t>(entry);
}
void MMIOHandler::FireAccessWatch(AccessWatchEntry* entry) {
ClearAccessWatch(entry);
entry->callback(entry->callback_context, entry->callback_data,
entry->address);
}
void MMIOHandler::ClearAccessWatch(AccessWatchEntry* entry) {
memory::Protect(physical_membase_ + entry->address, entry->length,
xe::memory::PageAccess::kReadWrite, nullptr);
memory::Protect(virtual_membase_ + 0xA0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadWrite, nullptr);
memory::Protect(virtual_membase_ + 0xC0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadWrite, nullptr);
memory::Protect(virtual_membase_ + 0xE0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadWrite, nullptr);
}
void MMIOHandler::CancelAccessWatch(uintptr_t watch_handle) {
auto entry = reinterpret_cast<AccessWatchEntry*>(watch_handle);
auto lock = global_critical_region_.Acquire();
// Allow access to the range again.
ClearAccessWatch(entry);
// Remove from table.
auto it = std::find(access_watches_.begin(), access_watches_.end(), entry);
assert_false(it == access_watches_.end());
if (it != access_watches_.end()) {
access_watches_.erase(it);
}
delete entry;
}
void MMIOHandler::InvalidateRange(uint32_t physical_address_and_heap,
uint32_t length) {
uint32_t heap_relative_address = physical_address_and_heap & 0x1FFFFFFF;
length = std::min(length, 0x20000000u - heap_relative_address);
if (length == 0) {
return;
}
auto lock = global_critical_region_.Acquire();
// Trigger the legacy (per-range) watches.
for (auto it = access_watches_.begin(); it != access_watches_.end();) {
auto entry = *it;
if ((entry->address <= heap_relative_address &&
entry->address + entry->length > heap_relative_address) ||
(entry->address >= heap_relative_address &&
entry->address < heap_relative_address + length)) {
// This watch lies within the range. End it.
FireAccessWatch(entry);
it = access_watches_.erase(it);
delete entry;
continue;
}
++it;
}
}
bool MMIOHandler::IsRangeWatched(uint32_t physical_address, size_t length) {
auto lock = global_critical_region_.Acquire();
for (auto it = access_watches_.begin(); it != access_watches_.end(); ++it) {
auto entry = *it;
if ((entry->address <= physical_address &&
entry->address + entry->length > physical_address + length)) {
// This range lies entirely within this watch.
return true;
}
// TODO(DrChat): Check if the range is partially covered, and subtract the
// covered portion if it is.
if ((entry->address <= physical_address &&
entry->address + entry->length > physical_address)) {
// The beginning of range lies partially within this watch.
} else if ((entry->address < physical_address + length &&
entry->address + entry->length > physical_address + length)) {
// The ending of this range lies partially within this watch.
}
}
return false;
}
bool MMIOHandler::CheckAccessWatch(uint32_t physical_address,
uint32_t heap_address) {
bool hit = false;
// Trigger legacy (per-range) access watches.
// TODO(Triang3l): Remove when legacy watches are deleted.
auto lock = global_critical_region_.Acquire();
for (auto it = access_watches_.begin(); it != access_watches_.end();) {
auto entry = *it;
if (entry->address <= physical_address &&
entry->address + entry->length > physical_address) {
// Hit! Remove the watch.
hit = true;
FireAccessWatch(entry);
it = access_watches_.erase(it);
delete entry;
continue;
}
++it;
}
if (!hit) {
// Rethrow access violation - range was not being watched.
return false;
}
// Range was watched, so lets eat this access violation.
return true;
}
struct DecodedMov {
size_t length;
// Inidicates this is a load (or conversely a store).
@ -514,8 +308,8 @@ bool MMIOHandler::ExceptionCallback(Exception* ex) {
guest_heap_address = 0;
}
// HACK: Recheck if the pages are still protected (race condition - another
// thread clears the writewatch we just hit)
// Recheck if the pages are still protected (race condition - another thread
// clears the writewatch we just hit).
// Do this under the lock so we don't introduce another race condition.
auto lock = global_critical_region_.Acquire();
memory::PageAccess cur_access;
@ -527,26 +321,22 @@ bool MMIOHandler::ExceptionCallback(Exception* ex) {
return true;
}
// Access is not found within any range, so fail and let the caller handle
// it (likely by aborting).
// TODO(Triang3l): Remove legacy CheckAccessWatch, only call the callback.
bool hit = CheckAccessWatch(guest_address, guest_heap_address);
// The address is not found within any range, so either a write watch or an
// actual access violation.
if (access_violation_callback_) {
switch (ex->access_violation_operation()) {
case Exception::AccessViolationOperation::kRead:
hit |= access_violation_callback_(access_violation_callback_context_,
return access_violation_callback_(access_violation_callback_context_,
size_t(ex->fault_address()), false);
break;
case Exception::AccessViolationOperation::kWrite:
hit |= access_violation_callback_(access_violation_callback_context_,
return access_violation_callback_(access_violation_callback_context_,
size_t(ex->fault_address()), true);
break;
default:
// Data Execution Prevention or something else uninteresting.
break;
}
}
return hit;
return false;
}
auto rip = ex->pc();

View File

@ -10,7 +10,6 @@
#ifndef XENIA_CPU_MMIO_HANDLER_H_
#define XENIA_CPU_MMIO_HANDLER_H_
#include <list>
#include <memory>
#include <vector>
@ -48,12 +47,6 @@ class MMIOHandler {
typedef bool (*AccessViolationCallback)(void* context, size_t host_address,
bool is_write);
enum WatchType {
kWatchInvalid = 0,
kWatchWrite = 1,
kWatchReadWrite = 2,
};
// access_violation_callback is called in global_critical_region, so if
// multiple threads trigger an access violation in the same page, the callback
// will be called only once.
@ -71,37 +64,7 @@ class MMIOHandler {
bool CheckLoad(uint32_t virtual_address, uint32_t* out_value);
bool CheckStore(uint32_t virtual_address, uint32_t value);
// Memory watches: These are one-shot alarms that fire a callback (in the
// context of the thread that caused the callback) when a memory range is
// either written to or read from, depending on the watch type. These fire as
// soon as a read/write happens, and only fire once.
// These watches may be spuriously fired if memory is accessed nearby.
// TODO(Triang3l): This is legacy currently used only to support the old
// Vulkan graphics layer. Remove and use WatchPhysicalMemoryWrite instead.
uintptr_t AddPhysicalAccessWatch(uint32_t guest_address, size_t length,
WatchType type, AccessWatchCallback callback,
void* callback_context, void* callback_data);
void CancelAccessWatch(uintptr_t watch_handle);
// Fires and clears any write watches that overlap this range in one heap.
// Unprotecting can be inhibited if this is called right before applying
// different protection to the same range.
void InvalidateRange(uint32_t physical_address_and_heap, uint32_t length);
// Returns true if /all/ of this range is watched.
// TODO(Triang3l): Remove when legacy watches are removed.
bool IsRangeWatched(uint32_t physical_address, size_t length);
protected:
struct AccessWatchEntry {
uint32_t address;
uint32_t length;
WatchType type;
AccessWatchCallback callback;
void* callback_context;
void* callback_data;
};
MMIOHandler(uint8_t* virtual_membase, uint8_t* physical_membase,
uint8_t* membase_end,
AccessViolationCallback access_violation_callback,
@ -110,10 +73,6 @@ class MMIOHandler {
static bool ExceptionCallbackThunk(Exception* ex, void* data);
bool ExceptionCallback(Exception* ex);
void FireAccessWatch(AccessWatchEntry* entry);
void ClearAccessWatch(AccessWatchEntry* entry);
bool CheckAccessWatch(uint32_t guest_address, uint32_t guest_heap_address);
uint8_t* virtual_membase_;
uint8_t* physical_membase_;
uint8_t* memory_end_;
@ -123,11 +82,9 @@ class MMIOHandler {
AccessViolationCallback access_violation_callback_;
void* access_violation_callback_context_;
xe::global_critical_region global_critical_region_;
// TODO(benvanik): data structure magic.
std::list<AccessWatchEntry*> access_watches_;
static MMIOHandler* global_handler_;
xe::global_critical_region global_critical_region_;
};
} // namespace cpu

View File

@ -632,9 +632,8 @@ std::pair<uint32_t, uint32_t> SharedMemory::MemoryWriteCallback(
FireWatches(page_first, page_last, false);
return std::make_pair<uint32_t, uint32_t>(page_first << page_size_log2_,
(page_last - page_first + 1)
<< page_size_log2_);
return std::make_pair(page_first << page_size_log2_,
(page_last - page_first + 1) << page_size_log2_);
}
void SharedMemory::TransitionBuffer(D3D12_RESOURCE_STATES new_state) {

View File

@ -10,6 +10,8 @@
#include "xenia/gpu/vulkan/texture_cache.h"
#include "xenia/gpu/vulkan/texture_config.h"
#include <algorithm>
#include "xenia/base/logging.h"
#include "xenia/base/math.h"
#include "xenia/base/memory.h"
@ -147,10 +149,19 @@ VkResult TextureCache::Initialize() {
invalidated_textures_ = &invalidated_textures_sets_[0];
device_queue_ = device_->AcquireQueue(device_->queue_family_index());
physical_write_watch_handle_ =
memory_->RegisterPhysicalWriteWatch(MemoryWriteCallbackThunk, this);
return VK_SUCCESS;
}
void TextureCache::Shutdown() {
if (physical_write_watch_handle_ != nullptr) {
memory_->UnregisterPhysicalWriteWatch(physical_write_watch_handle_);
physical_write_watch_handle_ = nullptr;
}
if (device_queue_) {
device_->ReleaseQueue(device_queue_, device_->queue_family_index());
}
@ -290,7 +301,7 @@ TextureCache::Texture* TextureCache::AllocateTexture(
texture->alloc_info = vma_info;
texture->framebuffer = nullptr;
texture->usage_flags = image_info.usage;
texture->access_watch_handle = 0;
texture->is_watched = false;
texture->texture_info = texture_info;
return texture;
}
@ -313,9 +324,19 @@ bool TextureCache::FreeTexture(Texture* texture) {
it = texture->views.erase(it);
}
if (texture->access_watch_handle) {
memory_->CancelAccessWatch(texture->access_watch_handle);
texture->access_watch_handle = 0;
{
global_critical_region_.Acquire();
if (texture->is_watched) {
for (auto it = watched_textures_.begin();
it != watched_textures_.end();) {
if (it->texture == texture) {
watched_textures_.erase(it);
break;
}
++it;
}
texture->is_watched = false;
}
}
vmaDestroyImage(mem_allocator_, texture->image, texture->alloc);
@ -323,25 +344,134 @@ bool TextureCache::FreeTexture(Texture* texture) {
return true;
}
void TextureCache::WatchCallback(void* context_ptr, void* data_ptr,
uint32_t address) {
auto self = reinterpret_cast<TextureCache*>(context_ptr);
auto touched_texture = reinterpret_cast<Texture*>(data_ptr);
if (!touched_texture || !touched_texture->access_watch_handle ||
touched_texture->pending_invalidation) {
return;
void TextureCache::WatchTexture(Texture* texture) {
uint32_t address, size;
{
global_critical_region_.Acquire();
assert_false(texture->is_watched);
WatchedTexture watched_texture;
if (texture->texture_info.memory.base_address &&
texture->texture_info.memory.base_size) {
watched_texture.is_mip = false;
address = texture->texture_info.memory.base_address;
size = texture->texture_info.memory.base_size;
} else if (texture->texture_info.memory.mip_address &&
texture->texture_info.memory.mip_size) {
watched_texture.is_mip = true;
address = texture->texture_info.memory.mip_address;
size = texture->texture_info.memory.mip_size;
} else {
return;
}
watched_texture.texture = texture;
// Fire any access watches that overlap this region.
for (auto it = watched_textures_.begin(); it != watched_textures_.end();) {
// Case 1: 2222222|222|11111111
// Case 2: 1111111|222|22222222
// Case 3: 1111111|222|11111111 (fragmentation)
// Case 4: 2222222|222|22222222 (complete overlap)
Texture* other_texture = it->texture;
uint32_t other_address, other_size;
if (it->is_mip) {
other_address = other_texture->texture_info.memory.mip_address;
other_size = other_texture->texture_info.memory.mip_size;
} else {
other_address = other_texture->texture_info.memory.base_address;
other_size = other_texture->texture_info.memory.base_size;
}
bool hit = false;
if (address <= other_address && address + size > other_address) {
hit = true;
} else if (other_address <= address &&
other_address + other_size > address) {
hit = true;
} else if (other_address <= address &&
other_address + other_size > address + size) {
hit = true;
} else if (other_address >= address &&
other_address + other_size < address + size) {
hit = true;
}
if (hit) {
TextureTouched(other_texture);
it = watched_textures_.erase(it);
continue;
}
++it;
}
watched_textures_.push_back(watched_texture);
}
assert_not_zero(touched_texture->access_watch_handle);
// Clear watch handle first so we don't redundantly
// remove.
touched_texture->access_watch_handle = 0;
touched_texture->pending_invalidation = true;
memory_->WatchPhysicalMemoryWrite(address, size);
}
// Add to pending list so Scavenge will clean it up.
self->invalidated_textures_mutex_.lock();
self->invalidated_textures_->insert(touched_texture);
self->invalidated_textures_mutex_.unlock();
void TextureCache::TextureTouched(Texture* texture) {
if (texture->pending_invalidation) {
return;
}
{
auto global_lock = global_critical_region_.Acquire();
assert_true(texture->is_watched);
texture->is_watched = false;
// Add to pending list so Scavenge will clean it up.
invalidated_textures_->insert(texture);
}
texture->pending_invalidation = true;
}
std::pair<uint32_t, uint32_t> TextureCache::MemoryWriteCallback(
uint32_t physical_address_start, uint32_t length, bool exact_range) {
global_critical_region_.Acquire();
if (watched_textures_.empty()) {
return std::make_pair<uint32_t, uint32_t>(0, UINT32_MAX);
}
// Get the texture within the range, or otherwise get the gap between two
// adjacent textures that can be safely unwatched.
uint32_t written_range_end = physical_address_start + length;
uint32_t previous_end = 0, next_start = UINT32_MAX;
for (auto it = watched_textures_.begin(); it != watched_textures_.end();) {
Texture* texture = it->texture;
uint32_t texture_address, texture_size;
if (it->is_mip) {
texture_address = texture->texture_info.memory.mip_address;
texture_size = texture->texture_info.memory.mip_size;
} else {
texture_address = texture->texture_info.memory.base_address;
texture_size = texture->texture_info.memory.base_size;
}
if (texture_address >= written_range_end) {
// Completely after the written range.
next_start = std::min(next_start, texture_address);
} else {
uint32_t texture_end = texture_address + texture_size;
if (texture_end <= physical_address_start) {
// Completely before the written range.
previous_end = std::max(previous_end, texture_end);
} else {
// Hit.
TextureTouched(texture);
it = watched_textures_.erase(it);
return std::make_pair(texture_address, texture_size);
}
}
++it;
}
return std::make_pair(previous_end, next_start - previous_end);
}
std::pair<uint32_t, uint32_t> TextureCache::MemoryWriteCallbackThunk(
void* context_ptr, uint32_t physical_address_start, uint32_t length,
bool exact_range) {
return reinterpret_cast<TextureCache*>(context_ptr)
->MemoryWriteCallback(physical_address_start, length, exact_range);
}
TextureCache::Texture* TextureCache::DemandResolveTexture(
@ -396,15 +526,7 @@ TextureCache::Texture* TextureCache::DemandResolveTexture(
get_dimension_name(texture_info.dimension)));
// Setup an access watch. If this texture is touched, it is destroyed.
if (texture_info.memory.base_address && texture_info.memory.base_size) {
texture->access_watch_handle = memory_->AddPhysicalAccessWatch(
texture_info.memory.base_address, texture_info.memory.base_size,
cpu::MMIOHandler::kWatchWrite, &WatchCallback, this, texture);
} else if (texture_info.memory.mip_address && texture_info.memory.mip_size) {
texture->access_watch_handle = memory_->AddPhysicalAccessWatch(
texture_info.memory.mip_address, texture_info.memory.mip_size,
cpu::MMIOHandler::kWatchWrite, &WatchCallback, this, texture);
}
WatchTexture(texture);
textures_[texture_hash] = texture;
COUNT_profile_set("gpu/texture_cache/textures", textures_.size());
@ -491,15 +613,7 @@ TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
// Okay. Put a writewatch on it to tell us if it's been modified from the
// guest.
if (texture_info.memory.base_address && texture_info.memory.base_size) {
texture->access_watch_handle = memory_->AddPhysicalAccessWatch(
texture_info.memory.base_address, texture_info.memory.base_size,
cpu::MMIOHandler::kWatchWrite, &WatchCallback, this, texture);
} else if (texture_info.memory.mip_address && texture_info.memory.mip_size) {
texture->access_watch_handle = memory_->AddPhysicalAccessWatch(
texture_info.memory.mip_address, texture_info.memory.mip_size,
cpu::MMIOHandler::kWatchWrite, &WatchCallback, this, texture);
}
WatchTexture(texture);
return texture;
}
@ -1442,15 +1556,17 @@ bool TextureCache::SetupTextureBinding(VkCommandBuffer command_buffer,
}
void TextureCache::RemoveInvalidatedTextures() {
// Clean up any invalidated textures.
invalidated_textures_mutex_.lock();
std::unordered_set<Texture*>& invalidated_textures = *invalidated_textures_;
if (invalidated_textures_ == &invalidated_textures_sets_[0]) {
invalidated_textures_ = &invalidated_textures_sets_[1];
} else {
invalidated_textures_ = &invalidated_textures_sets_[0];
// Clean up any invalidated textures.
{
auto global_lock = global_critical_region_.Acquire();
if (invalidated_textures_ == &invalidated_textures_sets_[0]) {
invalidated_textures_ = &invalidated_textures_sets_[1];
} else {
invalidated_textures_ = &invalidated_textures_sets_[0];
}
}
invalidated_textures_mutex_.unlock();
// Append all invalidated textures to a deletion queue. They will be deleted
// when all command buffers using them have finished executing.

View File

@ -10,9 +10,12 @@
#ifndef XENIA_GPU_VULKAN_TEXTURE_CACHE_H_
#define XENIA_GPU_VULKAN_TEXTURE_CACHE_H_
#include <algorithm>
#include <list>
#include <unordered_map>
#include <unordered_set>
#include "xenia/base/mutex.h"
#include "xenia/gpu/register_file.h"
#include "xenia/gpu/sampler_info.h"
#include "xenia/gpu/shader.h"
@ -50,7 +53,7 @@ class TextureCache {
VkFramebuffer framebuffer; // Blit target frame buffer.
VkImageUsageFlags usage_flags;
uintptr_t access_watch_handle;
bool is_watched;
bool pending_invalidation;
// Pointer to the latest usage fence.
@ -131,14 +134,24 @@ class TextureCache {
VkSampler sampler;
};
struct WatchedTexture {
Texture* texture;
bool is_mip;
};
// Allocates a new texture and memory to back it on the GPU.
Texture* AllocateTexture(const TextureInfo& texture_info,
VkFormatFeatureFlags required_flags =
VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
bool FreeTexture(Texture* texture);
static void WatchCallback(void* context_ptr, void* data_ptr,
uint32_t address);
void WatchTexture(Texture* texture);
void TextureTouched(Texture* texture);
std::pair<uint32_t, uint32_t> MemoryWriteCallback(
uint32_t physical_address_start, uint32_t length, bool exact_range);
static std::pair<uint32_t, uint32_t> MemoryWriteCallbackThunk(
void* context_ptr, uint32_t physical_address_start, uint32_t length,
bool exact_range);
// Demands a texture. If command_buffer is null and the texture hasn't been
// uploaded to graphics memory already, we will return null and bail.
@ -207,7 +220,10 @@ class TextureCache {
std::unordered_map<uint64_t, Sampler*> samplers_;
std::list<Texture*> pending_delete_textures_;
std::mutex invalidated_textures_mutex_;
void* physical_write_watch_handle_ = nullptr;
xe::global_critical_region global_critical_region_;
std::list<WatchedTexture> watched_textures_;
std::unordered_set<Texture*>* invalidated_textures_;
std::unordered_set<Texture*> invalidated_textures_sets_[2];

View File

@ -443,31 +443,12 @@ bool Memory::AccessViolationCallbackThunk(void* context, size_t host_address,
host_address, is_write);
}
uintptr_t Memory::AddPhysicalAccessWatch(uint32_t physical_address,
uint32_t length,
cpu::MMIOHandler::WatchType type,
cpu::AccessWatchCallback callback,
void* callback_context,
void* callback_data) {
return mmio_handler_->AddPhysicalAccessWatch(physical_address, length, type,
callback, callback_context,
callback_data);
}
void Memory::CancelAccessWatch(uintptr_t watch_handle) {
mmio_handler_->CancelAccessWatch(watch_handle);
}
bool Memory::TriggerWatches(uint32_t virtual_address, uint32_t length,
bool is_write, bool unwatch_exact_range,
bool unprotect) {
BaseHeap* heap = LookupHeap(virtual_address);
if (heap == &heaps_.vA0000000 || heap == &heaps_.vC0000000 ||
heap == &heaps_.vE0000000) {
// TODO(Triang3l): Remove InvalidateRange when legacy (old Vulkan renderer)
// watches are removed.
cpu::MMIOHandler::global_handler()->InvalidateRange(virtual_address,
length);
return static_cast<PhysicalHeap*>(heap)->TriggerWatches(
virtual_address, length, is_write, unwatch_exact_range, unprotect);
}
@ -1456,10 +1437,6 @@ bool PhysicalHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
uint32_t parent_base_address = GetPhysicalAddress(base_address);
uint32_t region_size = 0;
if (QuerySize(base_address, &region_size)) {
// TODO(Triang3l): Remove InvalidateRange when legacy (old Vulkan renderer)
// watches are removed.
cpu::MMIOHandler::global_handler()->InvalidateRange(base_address,
region_size);
TriggerWatches(base_address, region_size, true, true,
!cvars::protect_on_release);
}
@ -1476,9 +1453,6 @@ bool PhysicalHeap::Protect(uint32_t address, uint32_t size, uint32_t protect,
uint32_t* old_protect) {
auto global_lock = global_critical_region_.Acquire();
// TODO(Triang3l): Remove InvalidateRange when legacy (old Vulkan renderer)
// watches are removed.
cpu::MMIOHandler::global_handler()->InvalidateRange(address, size);
TriggerWatches(address, size, true, true, false);
if (!parent_heap_->Protect(GetPhysicalAddress(address), size, protect,

View File

@ -319,21 +319,6 @@ class Memory {
// Gets the defined MMIO range for the given virtual address, if any.
cpu::MMIORange* LookupVirtualMappedRange(uint32_t virtual_address);
// Adds a write watch for the given physical address range that will trigger
// the specified callback whenever any bytes are written in that range.
// The returned handle can be used with CancelWriteWatch to remove the watch
// if it is no longer required.
//
// This has a significant performance penalty for writes in in the range or
// nearby (sharing 64KiB pages).
uintptr_t AddPhysicalAccessWatch(uint32_t physical_address, uint32_t length,
cpu::MMIOHandler::WatchType type,
cpu::AccessWatchCallback callback,
void* callback_context, void* callback_data);
// Cancels a write watch requested with AddPhysicalAccessWatch.
void CancelAccessWatch(uintptr_t watch_handle);
// Returns start and length of the smallest physical memory region surrounding
// the watched region that can be safely unwatched, if it doesn't matter,
// return (0, UINT32_MAX).