vk: Handle out of memory errors that are deferred during texture binding

- Set out-of-memory flag if we have a failure to build a subresource.
- Mark textures as requiring reload in case of references to invalid data.
- TODO: This is overly complicated and can be handled better with a smart restructure.
This commit is contained in:
kd-11 2021-07-27 00:16:22 +03:00 committed by kd-11
parent e5029c532b
commit c54ddabe0b
3 changed files with 83 additions and 22 deletions

View File

@ -157,6 +157,7 @@ void VKGSRender::load_texture_env()
return false;
};
vk::clear_status_interrupt(vk::out_of_memory);
std::lock_guard lock(m_sampler_mutex);
for (u32 textures_ref = current_fp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
@ -382,7 +383,10 @@ void VKGSRender::bind_texture_env()
if (view = sampler_state->image_handle; !view)
{
//Requires update, copy subresource
view = m_texture_cache.create_temporary_subresource(*m_current_command_buffer, sampler_state->external_subresource_desc);
if (!(view = m_texture_cache.create_temporary_subresource(*m_current_command_buffer, sampler_state->external_subresource_desc)))
{
vk::raise_status_interrupt(vk::out_of_memory);
}
}
else
{
@ -509,8 +513,10 @@ void VKGSRender::bind_texture_env()
if (!image_ptr && sampler_state->validate())
{
image_ptr = m_texture_cache.create_temporary_subresource(*m_current_command_buffer, sampler_state->external_subresource_desc);
m_vertex_textures_dirty[i] = true;
if (!(image_ptr = m_texture_cache.create_temporary_subresource(*m_current_command_buffer, sampler_state->external_subresource_desc)))
{
vk::raise_status_interrupt(vk::out_of_memory);
}
}
if (!image_ptr)
@ -630,7 +636,10 @@ void VKGSRender::bind_interpreter_texture_env()
if (view = sampler_state->image_handle; !view)
{
//Requires update, copy subresource
view = m_texture_cache.create_temporary_subresource(*m_current_command_buffer, sampler_state->external_subresource_desc);
if (!(view = m_texture_cache.create_temporary_subresource(*m_current_command_buffer, sampler_state->external_subresource_desc)))
{
vk::raise_status_interrupt(vk::out_of_memory);
}
}
else
{
@ -975,13 +984,40 @@ void VKGSRender::end()
ev->gpu_wait(*m_current_command_buffer);
}
if (!m_shader_interpreter.is_interpreter(m_program)) [[likely]]
int binding_attempts = 0;
while (binding_attempts++ < 3)
{
bind_texture_env();
}
else
{
bind_interpreter_texture_env();
if (!m_shader_interpreter.is_interpreter(m_program)) [[likely]]
{
bind_texture_env();
}
else
{
bind_interpreter_texture_env();
}
// TODO: Replace OOO tracking with ref-counting to simplify the logic
if (!vk::test_status_interrupt(vk::out_of_memory))
{
break;
}
if (!on_vram_exhausted(rsx::problem_severity::fatal))
{
// It is not possible to free memory. Just use placeholder textures. Can cause graphics glitches but shouldn't crash otherwise
break;
}
if (m_samplers_dirty)
{
// Reload texture env if referenced objects were invalidated during OOO handling.
load_texture_env();
}
else
{
// Nothing to reload, only texture cache references held. Simply attempt to bind again.
vk::clear_status_interrupt(vk::out_of_memory);
}
}
m_texture_cache.release_uncached_temporary_subresources();

View File

@ -838,18 +838,18 @@ bool VKGSRender::on_vram_exhausted(rsx::problem_severity severity)
{
// Evict some unused textures. Do not evict any active references
std::set<u32> exclusion_list;
for (auto i = 0; i < rsx::limits::fragment_textures_count; ++i)
auto scan_array = [&](const auto& texture_array)
{
const auto& tex = rsx::method_registers.fragment_textures[i];
const auto addr = rsx::get_address(tex.offset(), tex.location());
exclusion_list.insert(addr);
}
for (auto i = 0; i < rsx::limits::vertex_textures_count; ++i)
{
const auto& tex = rsx::method_registers.vertex_textures[i];
const auto addr = rsx::get_address(tex.offset(), tex.location());
exclusion_list.insert(addr);
}
for (auto i = 0ull; i < texture_array.size(); ++i)
{
const auto& tex = texture_array[i];
const auto addr = rsx::get_address(tex.offset(), tex.location());
exclusion_list.insert(addr);
}
};
scan_array(rsx::method_registers.fragment_textures);
scan_array(rsx::method_registers.vertex_textures);
// Hold the secondary lock guard to prevent threads from trying to touch access violation handler stuff
std::lock_guard lock(m_secondary_cb_guard);
@ -884,6 +884,30 @@ bool VKGSRender::on_vram_exhausted(rsx::problem_severity severity)
surface_cache_relieved = true;
m_rtts.free_invalidated(*m_current_command_buffer, severity);
}
if (severity >= rsx::problem_severity::fatal && surface_cache_relieved && !m_samplers_dirty)
{
// If surface cache was modified destructively, then we must reload samplers touching the surface cache.
bool invalidate_samplers = false;
auto scan_array = [&](const auto& texture_array, const auto& sampler_states)
{
for (auto i = 0ull; i < texture_array.size() && !invalidate_samplers; ++i)
{
if (texture_array[i].enabled() && sampler_states[i])
{
invalidate_samplers = (sampler_states[i]->upload_context == rsx::texture_upload_context::framebuffer_storage);
}
}
};
scan_array(rsx::method_registers.fragment_textures, fs_sampler_state);
scan_array(rsx::method_registers.vertex_textures, vs_sampler_state);
if (invalidate_samplers)
{
m_samplers_dirty.store(true);
}
}
}
const bool any_cache_relieved = (texture_cache_relieved || surface_cache_relieved);

View File

@ -38,7 +38,8 @@ namespace vk
{
uninterruptible = 1,
heap_dirty = 2,
heap_changed = 3
heap_changed = 3,
out_of_memory = 4
};
const vk::render_device *get_current_renderer();