From 2d55b12cc9a744afd393d0d9e8990155e540f363 Mon Sep 17 00:00:00 2001 From: "Dr. Chat" Date: Tue, 19 Jul 2016 13:59:40 -0500 Subject: [PATCH] Fix a race condition in x64 code cache involving RtlGrowFunctionTable --- src/xenia/cpu/backend/x64/x64_code_cache.cc | 48 ++++++++++++--------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/src/xenia/cpu/backend/x64/x64_code_cache.cc b/src/xenia/cpu/backend/x64/x64_code_cache.cc index 75df2e385..fc90102d2 100644 --- a/src/xenia/cpu/backend/x64/x64_code_cache.cc +++ b/src/xenia/cpu/backend/x64/x64_code_cache.cc @@ -90,7 +90,9 @@ void X64CodeCache::set_indirection_default(uint32_t default_value) { void X64CodeCache::AddIndirection(uint32_t guest_address, uint32_t host_address) { - assert_not_null(indirection_table_base_); + if (!indirection_table_base_) { + return; + } uint32_t* indirection_slot = reinterpret_cast( indirection_table_base_ + (guest_address - kIndirectionTableBase)); @@ -158,28 +160,32 @@ void* X64CodeCache::PlaceGuestCode(uint32_t guest_address, void* machine_code, (uint64_t(code_address - generated_code_base_) << 32) | generated_code_offset_, function_info); + + // TODO(DrChat): The following code doesn't really need to be under the + // global lock except for PlaceCode (but it depends on the previous code + // already being ran) + + // If we are going above the high water mark of committed memory, commit + // some more. It's ok if multiple threads do this, as redundant commits + // aren't harmful. + size_t old_commit_mark = generated_code_commit_mark_; + if (high_mark > old_commit_mark) { + size_t new_commit_mark = old_commit_mark + 16 * 1024 * 1024; + xe::memory::AllocFixed(generated_code_base_, new_commit_mark, + xe::memory::AllocationType::kCommit, + xe::memory::PageAccess::kExecuteReadWrite); + generated_code_commit_mark_.compare_exchange_strong(old_commit_mark, + new_commit_mark); + } + + // Copy code. + std::memcpy(code_address, machine_code, code_size); + + // Notify subclasses of placed code. + PlaceCode(guest_address, machine_code, code_size, stack_size, code_address, + unwind_reservation); } - // If we are going above the high water mark of committed memory, commit some - // more. It's ok if multiple threads do this, as redundant commits aren't - // harmful. - size_t old_commit_mark = generated_code_commit_mark_; - if (high_mark > old_commit_mark) { - size_t new_commit_mark = old_commit_mark + 16 * 1024 * 1024; - xe::memory::AllocFixed(generated_code_base_, new_commit_mark, - xe::memory::AllocationType::kCommit, - xe::memory::PageAccess::kExecuteReadWrite); - generated_code_commit_mark_.compare_exchange_strong(old_commit_mark, - new_commit_mark); - } - - // Copy code. - std::memcpy(code_address, machine_code, code_size); - - // Notify subclasses of placed code. - PlaceCode(guest_address, machine_code, code_size, stack_size, code_address, - unwind_reservation); - // Now that everything is ready, fix up the indirection table. // Note that we do support code that doesn't have an indirection fixup, so // ignore those when we see them.