forked from ShuriZma/suyu
1
0
Fork 0

vk_staging_buffer_pool: Fix softlock when stream buffer overflows

There was still a code path that could wait on a timeline semaphore tick
that would never be signalled.

While we are at it, make use of more STL algorithms.
This commit is contained in:
ReinUsesLisp 2021-01-23 17:59:32 -03:00
parent 3a2eefb16c
commit 025fe458ae
2 changed files with 20 additions and 19 deletions

View File

@ -142,33 +142,27 @@ void StagingBufferPool::TickFrame() {
} }
StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) { StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
for (size_t region = Region(free_iterator) + 1, if (AreRegionsActive(Region(free_iterator) + 1,
region_end = std::min(Region(iterator + size) + 1, NUM_SYNCS); std::min(Region(iterator + size) + 1, NUM_SYNCS))) {
region < region_end; ++region) { // Avoid waiting for the previous usages to be free
// If we'd have to wait, get a staging buffer to avoid waiting
if (!scheduler.IsFree(sync_ticks[region])) {
return GetStagingBuffer(size, MemoryUsage::Upload); return GetStagingBuffer(size, MemoryUsage::Upload);
} }
} const u64 current_tick = scheduler.CurrentTick();
for (size_t region = Region(used_iterator), region_end = Region(iterator); region < region_end; std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator),
++region) { current_tick);
sync_ticks[region] = scheduler.CurrentTick();
}
used_iterator = iterator; used_iterator = iterator;
free_iterator = std::max(free_iterator, iterator + size);
if (iterator + size > free_iterator) {
free_iterator = iterator + size;
}
if (iterator + size > STREAM_BUFFER_SIZE) { if (iterator + size > STREAM_BUFFER_SIZE) {
for (size_t region = Region(used_iterator); region < NUM_SYNCS; ++region) { std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS,
sync_ticks[region] = scheduler.CurrentTick(); current_tick);
}
used_iterator = 0; used_iterator = 0;
iterator = 0; iterator = 0;
free_iterator = size; free_iterator = size;
for (size_t region = 0, region_end = Region(size); region <= region_end; ++region) { if (AreRegionsActive(0, Region(size) + 1)) {
scheduler.Wait(sync_ticks[region]); // Avoid waiting for the previous usages to be free
return GetStagingBuffer(size, MemoryUsage::Upload);
} }
} }
const size_t offset = iterator; const size_t offset = iterator;
@ -180,6 +174,11 @@ StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
}; };
} }
bool StagingBufferPool::AreRegionsActive(size_t region_begin, size_t region_end) const {
return std::any_of(sync_ticks.begin() + region_begin, sync_ticks.begin() + region_end,
[this](u64 sync_tick) { return !scheduler.IsFree(sync_tick); });
};
StagingBufferRef StagingBufferPool::GetStagingBuffer(size_t size, MemoryUsage usage) { StagingBufferRef StagingBufferPool::GetStagingBuffer(size_t size, MemoryUsage usage) {
if (const std::optional<StagingBufferRef> ref = TryGetReservedBuffer(size, usage)) { if (const std::optional<StagingBufferRef> ref = TryGetReservedBuffer(size, usage)) {
return *ref; return *ref;

View File

@ -67,6 +67,8 @@ private:
StagingBufferRef GetStreamBuffer(size_t size); StagingBufferRef GetStreamBuffer(size_t size);
bool AreRegionsActive(size_t region_begin, size_t region_end) const;
StagingBufferRef GetStagingBuffer(size_t size, MemoryUsage usage); StagingBufferRef GetStagingBuffer(size_t size, MemoryUsage usage);
std::optional<StagingBufferRef> TryGetReservedBuffer(size_t size, MemoryUsage usage); std::optional<StagingBufferRef> TryGetReservedBuffer(size_t size, MemoryUsage usage);