DiscIO: Decompress to file using a larger buffer.

This improves performance by around 20% for me, and the memory use impact
is negligible considering Dolphin is otherwise unusable.
This commit is contained in:
Unknown W. Brackets 2014-11-27 08:34:44 -08:00
parent f2f83a0c60
commit 2635e7d9ea
1 changed files with 9 additions and 6 deletions

View File

@ -311,20 +311,23 @@ bool DecompressBlobToFile(const std::string& infile, const std::string& outfile,
} }
const CompressedBlobHeader &header = reader->GetHeader(); const CompressedBlobHeader &header = reader->GetHeader();
u8* buffer = new u8[header.block_size]; static const size_t BUFFER_BLOCKS = 32;
int progress_monitor = std::max<int>(1, header.num_blocks / 100); size_t buffer_size = header.block_size * BUFFER_BLOCKS;
u8* buffer = new u8[buffer_size];
u32 num_buffers = header.num_blocks / BUFFER_BLOCKS;
int progress_monitor = std::max<int>(1, num_buffers / 100);
bool was_cancelled = false; bool was_cancelled = false;
for (u64 i = 0; i < header.num_blocks; i++) for (u64 i = 0; i < num_buffers; i++)
{ {
if (i % progress_monitor == 0) if (i % progress_monitor == 0)
{ {
was_cancelled = !callback("Unpacking", (float)i / (float)header.num_blocks, arg); was_cancelled = !callback("Unpacking", (float)i / (float)num_buffers, arg);
if (was_cancelled) if (was_cancelled)
break; break;
} }
reader->Read(i * header.block_size, header.block_size, buffer); reader->Read(i * buffer_size, buffer_size, buffer);
f.WriteBytes(buffer, header.block_size); f.WriteBytes(buffer, buffer_size);
} }
delete[] buffer; delete[] buffer;