rsx: Narrow the race condition window further

- Needs aliased paging to be implemented to fix properly or a re-entrant global IO lock
This commit is contained in:
kd-11 2017-12-05 14:02:14 +03:00
parent 69eb483089
commit 0b3fbf1d4c
3 changed files with 23 additions and 16 deletions

View File

@ -939,25 +939,30 @@ namespace rsx
} }
//TODO: This bit can cause race conditions if other threads are accessing this memory //TODO: This bit can cause race conditions if other threads are accessing this memory
//1. Unprotect all memory in case of overlapping pages //1. Force readback if surface is not synchronized yet to make unlocked part finish quickly
for (auto &tex : data.sections_to_flush) for (auto &tex : data.sections_to_flush)
{ {
if (tex->is_locked()) if (tex->is_locked())
{ {
tex->unprotect(); if (!tex->is_synchronized())
tex->copy_texture(true, std::forward<Args>(extras)...);
m_cache[get_block_address(tex->get_section_base())].remove_one(); m_cache[get_block_address(tex->get_section_base())].remove_one();
} }
} }
//2. Write all the memory //TODO: Acquire global io lock here
//2. Unprotect all the memory
for (auto &tex : data.sections_to_flush) for (auto &tex : data.sections_to_flush)
{ {
if (!tex->flush(std::forward<Args>(extras)...)) tex->unprotect();
{ }
//Missed address, note this
//TODO: Lower severity when successful to keep the cache from overworking //3. Write all the memory
record_cache_miss(*tex); for (auto &tex : data.sections_to_flush)
} {
tex->flush(std::forward<Args>(extras)...);
} }
//Restore protection on the sections to reprotect //Restore protection on the sections to reprotect

View File

@ -794,7 +794,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
vk::texture_cache::thrashed_set result; vk::texture_cache::thrashed_set result;
{ {
std::lock_guard<std::mutex> lock(m_secondary_cb_guard); std::lock_guard<std::mutex> lock(m_secondary_cb_guard);
result = std::move(m_texture_cache.invalidate_address(address, is_writing, false, *m_device, m_secondary_command_buffer, m_memory_type_mapping, m_swap_chain->get_present_queue())); result = std::move(m_texture_cache.invalidate_address(address, is_writing, false, m_secondary_command_buffer, m_memory_type_mapping, m_swap_chain->get_present_queue()));
} }
if (!result.violation_handled) if (!result.violation_handled)
@ -882,7 +882,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
} }
} }
m_texture_cache.flush_all(result, *m_device, m_secondary_command_buffer, m_memory_type_mapping, m_swap_chain->get_present_queue()); m_texture_cache.flush_all(result, m_secondary_command_buffer, m_memory_type_mapping, m_swap_chain->get_present_queue());
if (has_queue_ref) if (has_queue_ref)
{ {
@ -897,7 +897,7 @@ void VKGSRender::on_notify_memory_unmapped(u32 address_base, u32 size)
{ {
std::lock_guard<std::mutex> lock(m_secondary_cb_guard); std::lock_guard<std::mutex> lock(m_secondary_cb_guard);
if (m_texture_cache.invalidate_range(address_base, size, true, true, false, if (m_texture_cache.invalidate_range(address_base, size, true, true, false,
*m_device, m_secondary_command_buffer, m_memory_type_mapping, m_swap_chain->get_present_queue()).violation_handled) m_secondary_command_buffer, m_memory_type_mapping, m_swap_chain->get_present_queue()).violation_handled)
{ {
m_texture_cache.purge_dirty(); m_texture_cache.purge_dirty();
{ {

View File

@ -244,12 +244,14 @@ namespace vk
} }
} }
bool flush(vk::render_device& dev, vk::command_buffer& cmd, vk::memory_type_mapping& memory_types, VkQueue submit_queue) bool flush(vk::command_buffer& cmd, vk::memory_type_mapping& memory_types, VkQueue submit_queue)
{ {
if (flushed) return true; if (flushed) return true;
if (m_device == nullptr) if (m_device == nullptr)
m_device = &dev; {
m_device = &cmd.get_command_pool().get_owner();
}
// Return false if a flush occured 'late', i.e we had a miss // Return false if a flush occured 'late', i.e we had a miss
bool result = true; bool result = true;
@ -883,7 +885,7 @@ namespace vk
template<typename RsxTextureType> template<typename RsxTextureType>
sampled_image_descriptor _upload_texture(vk::command_buffer& cmd, RsxTextureType& tex, rsx::vk_render_targets& m_rtts) sampled_image_descriptor _upload_texture(vk::command_buffer& cmd, RsxTextureType& tex, rsx::vk_render_targets& m_rtts)
{ {
return upload_texture(cmd, tex, m_rtts, *m_device, cmd, m_memory_types, const_cast<const VkQueue>(m_submit_queue)); return upload_texture(cmd, tex, m_rtts, cmd, m_memory_types, const_cast<const VkQueue>(m_submit_queue));
} }
bool blit(rsx::blit_src_info& src, rsx::blit_dst_info& dst, bool interpolate, rsx::vk_render_targets& m_rtts, vk::command_buffer& cmd) bool blit(rsx::blit_src_info& src, rsx::blit_dst_info& dst, bool interpolate, rsx::vk_render_targets& m_rtts, vk::command_buffer& cmd)
@ -924,7 +926,7 @@ namespace vk
} }
helper(&cmd); helper(&cmd);
return upload_scaled_image(src, dst, interpolate, cmd, m_rtts, helper, *m_device, cmd, m_memory_types, const_cast<const VkQueue>(m_submit_queue)); return upload_scaled_image(src, dst, interpolate, cmd, m_rtts, helper, cmd, m_memory_types, const_cast<const VkQueue>(m_submit_queue));
} }
const u32 get_unreleased_textures_count() const override const u32 get_unreleased_textures_count() const override