Merge pull request #444 from DrChat/mmio_fix

Fix MMIO Handler Race Condition
This commit is contained in:
Ben Vanik 2015-10-23 07:25:34 -07:00
commit c3ecb1bde9
3 changed files with 74 additions and 43 deletions

View File

@ -63,6 +63,12 @@ bool DeallocFixed(void* base_address, size_t length,
bool Protect(void* base_address, size_t length, PageAccess access,
PageAccess* out_old_access);
// Queries a region of pages to get the access rights. This will modify the
// length parameter to the length of pages with the same consecutive access
// rights. The length will start from the first byte of the first page of
// the region.
bool QueryProtect(void* base_address, size_t& length, PageAccess& access_out);
// Allocates a block of memory for a type with the given alignment.
// The memory must be freed with AlignedFree.
template <typename T>

View File

@ -50,6 +50,27 @@ DWORD ToWin32ProtectFlags(PageAccess access) {
}
}
PageAccess ToXeniaProtectFlags(DWORD access) {
if (access & PAGE_GUARD) {
// Strip the page guard flag for now...
access &= ~PAGE_GUARD;
}
switch (access) {
case PAGE_NOACCESS:
return PageAccess::kNoAccess;
case PAGE_READONLY:
return PageAccess::kReadOnly;
case PAGE_READWRITE:
return PageAccess::kReadWrite;
case PAGE_EXECUTE_READWRITE:
return PageAccess::kExecuteReadWrite;
default:
assert_unhandled_case(access);
return PageAccess::kNoAccess;
}
}
void* AllocFixed(void* base_address, size_t length,
AllocationType allocation_type, PageAccess access) {
DWORD alloc_type = 0;
@ -103,26 +124,27 @@ bool Protect(void* base_address, size_t length, PageAccess access,
return false;
}
if (out_old_access) {
switch (old_protect) {
case PAGE_NOACCESS:
*out_old_access = PageAccess::kNoAccess;
break;
case PAGE_READONLY:
*out_old_access = PageAccess::kReadOnly;
break;
case PAGE_READWRITE:
*out_old_access = PageAccess::kReadWrite;
break;
case PAGE_EXECUTE_READWRITE:
*out_old_access = PageAccess::kExecuteReadWrite;
default:
assert_unhandled_case(access);
break;
}
*out_old_access = ToXeniaProtectFlags(old_protect);
}
return true;
}
bool QueryProtect(void* base_address, size_t& length, PageAccess& access_out) {
access_out = PageAccess::kNoAccess;
MEMORY_BASIC_INFORMATION info;
ZeroMemory(&info, sizeof(info));
SIZE_T result = VirtualQuery(base_address, &info, length);
if (!result) {
return false;
}
length = info.RegionSize;
access_out = ToXeniaProtectFlags(info.Protect);
return true;
}
FileMappingHandle CreateFileMappingHandle(std::wstring path, size_t length,
PageAccess access, bool commit) {
DWORD protect =

View File

@ -116,33 +116,27 @@ uintptr_t MMIOHandler::AddPhysicalWriteWatch(uint32_t guest_address,
global_critical_region_.mutex().unlock();
// Make the desired range read only under all address spaces.
xe::memory::Protect(physical_membase_ + entry->address, entry->length,
xe::memory::PageAccess::kReadOnly, nullptr);
xe::memory::Protect(virtual_membase_ + 0xA0000000 + entry->address,
entry->length, xe::memory::PageAccess::kReadOnly,
nullptr);
xe::memory::Protect(virtual_membase_ + 0xC0000000 + entry->address,
entry->length, xe::memory::PageAccess::kReadOnly,
nullptr);
xe::memory::Protect(virtual_membase_ + 0xE0000000 + entry->address,
entry->length, xe::memory::PageAccess::kReadOnly,
nullptr);
memory::Protect(physical_membase_ + entry->address, entry->length,
xe::memory::PageAccess::kReadOnly, nullptr);
memory::Protect(virtual_membase_ + 0xA0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadOnly, nullptr);
memory::Protect(virtual_membase_ + 0xC0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadOnly, nullptr);
memory::Protect(virtual_membase_ + 0xE0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadOnly, nullptr);
return reinterpret_cast<uintptr_t>(entry);
}
void MMIOHandler::ClearWriteWatch(WriteWatchEntry* entry) {
xe::memory::Protect(physical_membase_ + entry->address, entry->length,
xe::memory::PageAccess::kReadWrite, nullptr);
xe::memory::Protect(virtual_membase_ + 0xA0000000 + entry->address,
entry->length, xe::memory::PageAccess::kReadWrite,
nullptr);
xe::memory::Protect(virtual_membase_ + 0xC0000000 + entry->address,
entry->length, xe::memory::PageAccess::kReadWrite,
nullptr);
xe::memory::Protect(virtual_membase_ + 0xE0000000 + entry->address,
entry->length, xe::memory::PageAccess::kReadWrite,
nullptr);
memory::Protect(physical_membase_ + entry->address, entry->length,
xe::memory::PageAccess::kReadWrite, nullptr);
memory::Protect(virtual_membase_ + 0xA0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadWrite, nullptr);
memory::Protect(virtual_membase_ + 0xC0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadWrite, nullptr);
memory::Protect(virtual_membase_ + 0xE0000000 + entry->address, entry->length,
xe::memory::PageAccess::kReadWrite, nullptr);
}
void MMIOHandler::CancelWriteWatch(uintptr_t watch_handle) {
@ -170,17 +164,26 @@ bool MMIOHandler::CheckWriteWatch(X64Context* thread_context,
}
std::list<WriteWatchEntry*> pending_invalidates;
global_critical_region_.mutex().lock();
// Now that we hold the lock, recheck and see if the pages are still
// protected.
memory::PageAccess cur_access;
size_t page_length = memory::page_size();
memory::QueryProtect((void*)fault_address, page_length, cur_access);
if (cur_access != memory::PageAccess::kReadOnly &&
cur_access != memory::PageAccess::kNoAccess) {
// Another thread has cleared this write watch. Abort.
return true;
}
for (auto it = write_watches_.begin(); it != write_watches_.end();) {
auto entry = *it;
if (entry->address <= physical_address &&
entry->address + entry->length > physical_address) {
// Hit!
// Hit! Remove the writewatch.
pending_invalidates.push_back(entry);
// TODO(benvanik): outside of lock?
ClearWriteWatch(entry);
auto erase_it = it;
++it;
write_watches_.erase(erase_it);
it = write_watches_.erase(it);
continue;
}
++it;