CDVD: Add minimum buffer size to ThreadedFileReader

If buffers are smaller than the threshold PCSX2 uses to decide whether to time a read as a seek, ThreadedFileReader fails to provide data in time for the very small linear read time cutoff
This commit is contained in:
TellowKrinkle 2021-03-27 06:20:58 -05:00 committed by lightningterror
parent f3ac8ee464
commit d4f87112ec
2 changed files with 69 additions and 70 deletions

View File

@ -16,6 +16,10 @@
#include "PrecompiledHeader.h" #include "PrecompiledHeader.h"
#include "ThreadedFileReader.h" #include "ThreadedFileReader.h"
// Make sure buffer size is bigger than the cutoff where PCSX2 emulates a seek
// If buffers are smaller than that, we can't keep up with linear reads
static constexpr u32 MINIMUM_SIZE = 128 * 1024;
ThreadedFileReader::ThreadedFileReader() ThreadedFileReader::ThreadedFileReader()
{ {
m_readThread = std::thread([](ThreadedFileReader* r){ r->Loop(); }, this); m_readThread = std::thread([](ThreadedFileReader* r){ r->Loop(); }, this);
@ -86,13 +90,33 @@ void ThreadedFileReader::Loop()
if (ok) if (ok)
{ {
// Readahead // Readahead
Chunk blk = ChunkForOffset(requestOffset + requestSize); Chunk chunk = ChunkForOffset(requestOffset + requestSize);
if (blk.chunkID >= 0) if (chunk.chunkID >= 0)
{ {
(void)GetBlockPtr(blk, true); int buffersFilled = 0;
blk = ChunkForOffset(blk.offset + blk.length); Buffer* buf = GetBlockPtr(chunk);
if (blk.chunkID >= 0) // Cancel readahead if a new request comes in
(void)GetBlockPtr(blk, true); while (buf && !m_requestPtr.load(std::memory_order_acquire))
{
u32 bufsize = buf->size.load(std::memory_order_relaxed);
chunk = ChunkForOffset(buf->offset + bufsize);
if (chunk.chunkID < 0)
break;
if (buf->offset + bufsize != chunk.offset || chunk.length + bufsize > buf->cap)
{
buffersFilled++;
if (buffersFilled >= 2)
break;
buf = GetBlockPtr(chunk);
}
else
{
int amt = ReadChunk(static_cast<char*>(buf->ptr) + bufsize, chunk.chunkID);
if (amt <= 0)
break;
buf->size.store(bufsize + amt, std::memory_order_release);
}
}
} }
} }
@ -108,11 +132,13 @@ void ThreadedFileReader::Loop()
} }
} }
ThreadedFileReader::Buffer* ThreadedFileReader::GetBlockPtr(const Chunk& block, bool isReadahead) ThreadedFileReader::Buffer* ThreadedFileReader::GetBlockPtr(const Chunk& block)
{ {
for (int i = 0; i < static_cast<int>(ArraySize(m_buffer)); i++) for (int i = 0; i < static_cast<int>(ArraySize(m_buffer)); i++)
{ {
if (m_buffer[i].valid.load(std::memory_order_acquire) && m_buffer[i].offset == block.offset) u32 size = m_buffer[i].size.load(std::memory_order_relaxed);
u64 offset = m_buffer[i].offset;
if (size && offset <= block.offset && offset + size >= block.offset + block.length)
{ {
m_nextBuffer = (i + 1) % ArraySize(m_buffer); m_nextBuffer = (i + 1) % ArraySize(m_buffer);
return m_buffer + i; return m_buffer + i;
@ -127,17 +153,19 @@ ThreadedFileReader::Buffer* ThreadedFileReader::GetBlockPtr(const Chunk& block,
std::unique_lock<std::mutex> lock(m_mtx, std::defer_lock); std::unique_lock<std::mutex> lock(m_mtx, std::defer_lock);
if (std::this_thread::get_id() == m_readThread.get_id()) if (std::this_thread::get_id() == m_readThread.get_id())
lock.lock(); lock.lock();
if (buf.size < block.length) u32 size = std::max(block.length, MINIMUM_SIZE);
buf.ptr = realloc(buf.ptr, block.length); if (buf.cap < size)
buf.valid.store(false, std::memory_order_relaxed); {
buf.ptr = realloc(buf.ptr, size);
buf.cap = size;
}
buf.size.store(0, std::memory_order_relaxed);
} }
int size = ReadChunk(buf.ptr, block.chunkID); int size = ReadChunk(buf.ptr, block.chunkID);
if (size > 0) if (size > 0)
{ {
buf.offset = block.offset; buf.offset = block.offset;
buf.size = size; buf.size.store(size, std::memory_order_release);
buf.valid.store(true, std::memory_order_release);
m_nextBuffer = (m_nextBuffer + 1) % ArraySize(m_buffer); m_nextBuffer = (m_nextBuffer + 1) % ArraySize(m_buffer);
return &buf; return &buf;
} }
@ -146,63 +174,34 @@ ThreadedFileReader::Buffer* ThreadedFileReader::GetBlockPtr(const Chunk& block,
bool ThreadedFileReader::Decompress(void* target, u64 begin, u32 size) bool ThreadedFileReader::Decompress(void* target, u64 begin, u32 size)
{ {
Chunk blk = ChunkForOffset(begin);
char* write = static_cast<char*>(target); char* write = static_cast<char*>(target);
u32 remaining = size; u32 remaining = size;
if (blk.offset != begin) u64 off = begin;
while (remaining)
{ {
u32 off = begin - blk.offset; Chunk chunk = ChunkForOffset(off);
u32 len = std::min(blk.length - off, size); if (m_internalBlockSize || chunk.offset != off || chunk.length > remaining)
// Partial block
if (Buffer* buf = GetBlockPtr(blk))
{ {
if (buf->size < blk.length) Buffer* buf = GetBlockPtr(chunk);
if (!buf)
return false; return false;
write += CopyBlocks(write, static_cast<char*>(buf->ptr) + off, len); u32 bufoff = off - buf->offset;
u32 bufsize = buf->size.load(std::memory_order_relaxed);
if (bufsize <= bufoff)
return false;
u32 len = std::min(bufsize - bufoff, remaining);
write += CopyBlocks(write, static_cast<char*>(buf->ptr) + bufoff, len);
remaining -= len; remaining -= len;
blk = ChunkForOffset(blk.offset + blk.length); off += len;
} }
else else
{ {
return false; int amt = ReadChunk(write, chunk.chunkID);
} if (amt < static_cast<int>(chunk.length))
}
while (blk.length <= remaining)
{
if (m_requestCancelled.load(std::memory_order_relaxed))
{
return false;
}
if (m_internalBlockSize)
{
if (Buffer* buf = GetBlockPtr(blk))
{
if (buf->size < blk.length)
return false;
write += CopyBlocks(write, buf->ptr, blk.length);
}
}
else
{
int amt = ReadChunk(write, blk.chunkID);
if (amt < static_cast<int>(blk.length))
return false; return false;
write += blk.length; write += chunk.length;
} remaining -= chunk.length;
remaining -= blk.length; off += chunk.length;
blk = ChunkForOffset(blk.offset + blk.length);
}
if (remaining)
{
if (Buffer* buf = GetBlockPtr(blk))
{
if (buf->size < remaining)
return false;
write += CopyBlocks(write, buf->ptr, remaining);
}
else
{
return false;
} }
} }
m_amtRead += write - static_cast<char*>(target); m_amtRead += write - static_cast<char*>(target);
@ -218,19 +217,20 @@ bool ThreadedFileReader::TryCachedRead(void*& buffer, u64& offset, u32& size, co
for (int i = 0; i < static_cast<int>(ArraySize(m_buffer) * 2); i++) for (int i = 0; i < static_cast<int>(ArraySize(m_buffer) * 2); i++)
{ {
Buffer& buf = m_buffer[i % ArraySize(m_buffer)]; Buffer& buf = m_buffer[i % ArraySize(m_buffer)];
if (!buf.valid.load(std::memory_order_acquire)) u32 bufsize = buf.size.load(std::memory_order_acquire);
if (!bufsize)
continue; continue;
if (buf.offset <= offset && buf.offset + buf.size > offset) if (buf.offset <= offset && buf.offset + bufsize > offset)
{ {
u32 off = offset - buf.offset; u32 off = offset - buf.offset;
u32 cpysize = std::min(size, buf.size - off); u32 cpysize = std::min(size, bufsize - off);
size_t read = CopyBlocks(buffer, static_cast<char*>(buf.ptr) + off, cpysize); size_t read = CopyBlocks(buffer, static_cast<char*>(buf.ptr) + off, cpysize);
m_amtRead += read; m_amtRead += read;
size -= cpysize; size -= cpysize;
offset += cpysize; offset += cpysize;
buffer = static_cast<char*>(buffer) + read; buffer = static_cast<char*>(buffer) + read;
if (size == 0) if (size == 0)
end = buf.offset + buf.size; end = buf.offset + bufsize;
} }
// Do buffers contain the current and next block? // Do buffers contain the current and next block?
if (end > 0 && buf.offset == end) if (end > 0 && buf.offset == end)
@ -345,7 +345,7 @@ void ThreadedFileReader::Close(void)
{ {
CancelAndWaitUntilStopped(); CancelAndWaitUntilStopped();
for (auto& buf : m_buffer) for (auto& buf : m_buffer)
buf.valid.store(false, std::memory_order_relaxed); buf.size.store(0, std::memory_order_relaxed);
Close2(); Close2();
} }

View File

@ -71,8 +71,8 @@ private:
{ {
void* ptr = nullptr; void* ptr = nullptr;
u64 offset = 0; u64 offset = 0;
u32 size = 0; std::atomic<u32> size{0};
std::atomic<bool> valid{false}; u32 cap = 0;
}; };
/// 2 buffers for readahead (current block, next block) /// 2 buffers for readahead (current block, next block)
Buffer m_buffer[2]; Buffer m_buffer[2];
@ -98,8 +98,7 @@ private:
void Loop(); void Loop();
/// Load the given block into one of the `m_buffer` buffers if necessary and return a pointer to its contents if successful /// Load the given block into one of the `m_buffer` buffers if necessary and return a pointer to its contents if successful
/// Writes to `m_status` if `!isReadahead` Buffer* GetBlockPtr(const Chunk& block);
Buffer* GetBlockPtr(const Chunk& block, bool isReadahead = false);
/// Decompress from offset to size into /// Decompress from offset to size into
bool Decompress(void* ptr, u64 offset, u32 size); bool Decompress(void* ptr, u64 offset, u32 size);
/// Cancel any inflight read and wait until the thread is no longer doing anything /// Cancel any inflight read and wait until the thread is no longer doing anything