mirror of https://github.com/PCSX2/pcsx2.git
CDVD: Use ThreadedFileReader for gzip ISOs
This commit is contained in:
parent
29e9125b15
commit
7587bb8a07
|
@ -1,45 +0,0 @@
|
|||
// SPDX-FileCopyrightText: 2002-2023 PCSX2 Dev Team
|
||||
// SPDX-License-Identifier: LGPL-3.0+
|
||||
|
||||
#include "ChunksCache.h"
|
||||
|
||||
void ChunksCache::SetLimit(uint megabytes)
|
||||
{
|
||||
m_limit = (s64)megabytes * 1024 * 1024;
|
||||
MatchLimit();
|
||||
}
|
||||
|
||||
void ChunksCache::MatchLimit(bool removeAll)
|
||||
{
|
||||
std::list<CacheEntry*>::reverse_iterator rit;
|
||||
while (!m_entries.empty() && (removeAll || m_size > m_limit))
|
||||
{
|
||||
rit = m_entries.rbegin();
|
||||
m_size -= (*rit)->size;
|
||||
delete (*rit);
|
||||
m_entries.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
void ChunksCache::Take(void* pMallocedSrc, s64 offset, int length, int coverage)
|
||||
{
|
||||
m_entries.push_front(new CacheEntry(pMallocedSrc, offset, length, coverage));
|
||||
m_size += length;
|
||||
MatchLimit();
|
||||
}
|
||||
|
||||
// By design, succeed only if the entire request is in a single cached chunk
|
||||
int ChunksCache::Read(void* pDest, s64 offset, int length)
|
||||
{
|
||||
for (auto it = m_entries.begin(); it != m_entries.end(); it++)
|
||||
{
|
||||
CacheEntry* e = *it;
|
||||
if (e && offset >= e->offset && (offset + length) <= (e->offset + e->coverage))
|
||||
{
|
||||
if (it != m_entries.begin())
|
||||
m_entries.splice(m_entries.begin(), m_entries, it); // Move to top (MRU)
|
||||
return CopyAvailable(e->data, e->offset, e->size, pDest, offset, length);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
// SPDX-FileCopyrightText: 2002-2023 PCSX2 Dev Team
|
||||
// SPDX-License-Identifier: LGPL-3.0+
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/Pcsx2Types.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
#include <list>
|
||||
|
||||
class ChunksCache
|
||||
{
|
||||
public:
|
||||
ChunksCache(uint initialLimitMb)
|
||||
: m_entries(0)
|
||||
, m_size(0)
|
||||
, m_limit(initialLimitMb * 1024 * 1024){};
|
||||
~ChunksCache() { Clear(); };
|
||||
void SetLimit(uint megabytes);
|
||||
void Clear() { MatchLimit(true); };
|
||||
|
||||
void Take(void* pMallocedSrc, s64 offset, int length, int coverage);
|
||||
int Read(void* pDest, s64 offset, int length);
|
||||
|
||||
static int CopyAvailable(void* pSrc, s64 srcOffset, int srcSize,
|
||||
void* pDst, s64 dstOffset, int maxCopySize)
|
||||
{
|
||||
int available = std::clamp(maxCopySize, 0, std::max((int)(srcOffset + srcSize - dstOffset), 0));
|
||||
std::memcpy(pDst, (char*)pSrc + (dstOffset - srcOffset), available);
|
||||
return available;
|
||||
};
|
||||
|
||||
private:
|
||||
class CacheEntry
|
||||
{
|
||||
public:
|
||||
CacheEntry(void* pMallocedSrc, s64 offset, int length, int coverage)
|
||||
: data(pMallocedSrc)
|
||||
, offset(offset)
|
||||
, coverage(coverage)
|
||||
, size(length){};
|
||||
|
||||
~CacheEntry()
|
||||
{
|
||||
if (data)
|
||||
free(data);
|
||||
};
|
||||
|
||||
void* data;
|
||||
s64 offset;
|
||||
int coverage;
|
||||
int size;
|
||||
};
|
||||
|
||||
std::list<CacheEntry*> m_entries;
|
||||
void MatchLimit(bool removeAll = false);
|
||||
s64 m_size;
|
||||
s64 m_limit;
|
||||
};
|
|
@ -3,17 +3,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
// Based on testing, the overhead of using this cache is high.
|
||||
//
|
||||
// The test was done with CSO files using a block size of 16KB.
|
||||
// Cache hit rates were observed in the range of 25%.
|
||||
// Cache overhead added 35% to the overall read time.
|
||||
//
|
||||
// For this reason, it's currently disabled.
|
||||
#define CSO_USE_CHUNKSCACHE 0
|
||||
|
||||
#include "ThreadedFileReader.h"
|
||||
#include "ChunksCache.h"
|
||||
#include <zlib.h>
|
||||
|
||||
struct CsoHeader;
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
// SPDX-License-Identifier: LGPL-3.0+
|
||||
|
||||
#include "Config.h"
|
||||
#include "ChunksCache.h"
|
||||
#include "GzippedFileReader.h"
|
||||
#include "Host.h"
|
||||
#include "CDVD/zlib_indexed.h"
|
||||
|
@ -32,14 +31,14 @@ static Access* ReadIndexFromFile(const char* filename)
|
|||
s64 size;
|
||||
if ((size = FileSystem::FSize64(fp.get())) <= 0)
|
||||
{
|
||||
Console.Error(fmt::format("Invalid gzip index size: {}", size));
|
||||
ERROR_LOG("Invalid gzip index size: {}", size);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
char fileId[GZIP_ID_LEN + 1] = {0};
|
||||
if (std::fread(fileId, GZIP_ID_LEN, 1, fp.get()) != 1 || std::memcmp(fileId, GZIP_ID, 4) != 0)
|
||||
{
|
||||
Console.Error(fmt::format("Incompatible gzip index, please delete it manually: '{}'", filename));
|
||||
ERROR_LOG("Incompatible gzip index: '{}'", filename);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -48,7 +47,7 @@ static Access* ReadIndexFromFile(const char* filename)
|
|||
if (std::fread(index, sizeof(Access), 1, fp.get()) != 1 ||
|
||||
datasize != static_cast<s64>(index->have) * static_cast<s64>(sizeof(Point)))
|
||||
{
|
||||
Console.Error(fmt::format("Unexpected size of gzip index, please delete it manually: '{}'.", filename));
|
||||
ERROR_LOG("Unexpected size of gzip index: '{}'.", filename);
|
||||
std::free(index);
|
||||
return 0;
|
||||
}
|
||||
|
@ -56,7 +55,7 @@ static Access* ReadIndexFromFile(const char* filename)
|
|||
char* buffer = static_cast<char*>(std::malloc(datasize));
|
||||
if (std::fread(buffer, datasize, 1, fp.get()) != 1)
|
||||
{
|
||||
Console.Error(fmt::format("Failed read of gzip index, please delete it manually: '{}'.", filename));
|
||||
ERROR_LOG("Failed read of gzip index: '{}'.");
|
||||
std::free(buffer);
|
||||
std::free(index);
|
||||
return 0;
|
||||
|
@ -68,12 +67,6 @@ static Access* ReadIndexFromFile(const char* filename)
|
|||
|
||||
static void WriteIndexToFile(Access* index, const char* filename)
|
||||
{
|
||||
if (FileSystem::FileExists(filename))
|
||||
{
|
||||
Console.Warning("WARNING: Won't write index - file name exists (please delete it manually): '%s'", filename);
|
||||
return;
|
||||
}
|
||||
|
||||
auto fp = FileSystem::OpenManagedCFile(filename, "wb");
|
||||
if (!fp)
|
||||
return;
|
||||
|
@ -89,13 +82,9 @@ static void WriteIndexToFile(Access* index, const char* filename)
|
|||
|
||||
// Verify
|
||||
if (!success)
|
||||
{
|
||||
Console.Warning("Warning: Can't write index file to disk: '%s'", filename);
|
||||
}
|
||||
ERROR_LOG("Warning: Can't write index file to disk: '{}'", filename);
|
||||
else
|
||||
{
|
||||
Console.WriteLn(Color_Green, "OK: Gzip quick access index file saved to disk: '%s'", filename);
|
||||
}
|
||||
INFO_LOG("Gzip quick access index file saved to disk: '{}'", filename);
|
||||
}
|
||||
|
||||
static const char* INDEX_TEMPLATE_KEY = "$(f)";
|
||||
|
@ -121,9 +110,9 @@ static std::string ApplyTemplate(const std::string& name, const std::string& bas
|
|||
|| first != trimmedTemplate.rfind(INDEX_TEMPLATE_KEY) // more than one instance
|
||||
|| (!canEndWithKey && first == trimmedTemplate.length() - std::strlen(INDEX_TEMPLATE_KEY)))
|
||||
{
|
||||
Error::SetString(error, fmt::format("Invalid {} template '{}'.\n"
|
||||
Error::SetStringFmt(error, "Invalid {} template '{}'.\n"
|
||||
"Template must contain exactly one '%s' and must not end with it. Aborting.",
|
||||
name, trimmedTemplate, INDEX_TEMPLATE_KEY));
|
||||
name, trimmedTemplate, INDEX_TEMPLATE_KEY);
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -144,119 +133,13 @@ static std::string iso2indexname(const std::string& isoname, Error* error)
|
|||
return ApplyTemplate("gzip index", appRoot, Host::GetBaseStringSettingValue("EmuCore", "GzipIsoIndexTemplate", "$(f).pindex.tmp"), isoname, false, error);
|
||||
}
|
||||
|
||||
GzippedFileReader::GzippedFileReader()
|
||||
: m_cache(GZFILE_CACHE_SIZE_MB)
|
||||
|
||||
GzippedFileReader::GzippedFileReader() = default;
|
||||
|
||||
GzippedFileReader::~GzippedFileReader() = default;
|
||||
|
||||
bool GzippedFileReader::LoadOrCreateIndex(Error* error)
|
||||
{
|
||||
m_blocksize = 2048;
|
||||
AsyncPrefetchReset();
|
||||
}
|
||||
|
||||
GzippedFileReader::~GzippedFileReader()
|
||||
{
|
||||
Close();
|
||||
}
|
||||
|
||||
void GzippedFileReader::InitZstates()
|
||||
{
|
||||
if (m_zstates)
|
||||
{
|
||||
delete[] m_zstates;
|
||||
m_zstates = 0;
|
||||
}
|
||||
if (!m_pIndex)
|
||||
return;
|
||||
|
||||
// having another extra element helps avoiding logic for last (so 2+ instead of 1+)
|
||||
int size = 2 + m_pIndex->uncompressed_size / m_pIndex->span;
|
||||
m_zstates = new Czstate[size]();
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
void GzippedFileReader::AsyncPrefetchReset(){};
|
||||
void GzippedFileReader::AsyncPrefetchOpen(){};
|
||||
void GzippedFileReader::AsyncPrefetchClose(){};
|
||||
void GzippedFileReader::AsyncPrefetchChunk(s64 dummy){};
|
||||
void GzippedFileReader::AsyncPrefetchCancel(){};
|
||||
#else
|
||||
// AsyncPrefetch works as follows:
|
||||
// ater extracting a chunk from the compressed file, ask the OS to asynchronously
|
||||
// read the next chunk from the file, and then completely ignore the result and
|
||||
// cancel the async read before the next extract. the next extract then reads the
|
||||
// data from the disk buf if it's overlapping/contained within the chunk we've
|
||||
// asked the OS to prefetch, then the OS is likely to already have it cached.
|
||||
// This procedure is frequently able to overcome seek time due to fragmentation of the
|
||||
// compressed file on disk without any meaningful penalty.
|
||||
// This system is only enabled for win32 where we have this async read request.
|
||||
void GzippedFileReader::AsyncPrefetchReset()
|
||||
{
|
||||
hOverlappedFile = INVALID_HANDLE_VALUE;
|
||||
asyncInProgress = false;
|
||||
}
|
||||
|
||||
void GzippedFileReader::AsyncPrefetchOpen()
|
||||
{
|
||||
hOverlappedFile = CreateFile(
|
||||
FileSystem::GetWin32Path(m_filename).c_str(),
|
||||
GENERIC_READ,
|
||||
FILE_SHARE_READ,
|
||||
NULL,
|
||||
OPEN_EXISTING,
|
||||
FILE_FLAG_SEQUENTIAL_SCAN | FILE_FLAG_OVERLAPPED,
|
||||
NULL);
|
||||
};
|
||||
|
||||
void GzippedFileReader::AsyncPrefetchClose()
|
||||
{
|
||||
AsyncPrefetchCancel();
|
||||
|
||||
if (hOverlappedFile != INVALID_HANDLE_VALUE)
|
||||
CloseHandle(hOverlappedFile);
|
||||
|
||||
AsyncPrefetchReset();
|
||||
};
|
||||
|
||||
void GzippedFileReader::AsyncPrefetchChunk(s64 start)
|
||||
{
|
||||
if (hOverlappedFile == INVALID_HANDLE_VALUE || asyncInProgress)
|
||||
{
|
||||
Console.Warning("Unexpected file handle or progress state. Aborting prefetch.");
|
||||
return;
|
||||
}
|
||||
|
||||
LARGE_INTEGER offset;
|
||||
offset.QuadPart = start;
|
||||
|
||||
DWORD bytesToRead = GZFILE_READ_CHUNK_SIZE;
|
||||
|
||||
ZeroMemory(&asyncOperationContext, sizeof(asyncOperationContext));
|
||||
asyncOperationContext.hEvent = 0;
|
||||
asyncOperationContext.Offset = offset.LowPart;
|
||||
asyncOperationContext.OffsetHigh = offset.HighPart;
|
||||
|
||||
ReadFile(hOverlappedFile, mDummyAsyncPrefetchTarget, bytesToRead, NULL, &asyncOperationContext);
|
||||
asyncInProgress = true;
|
||||
};
|
||||
|
||||
void GzippedFileReader::AsyncPrefetchCancel()
|
||||
{
|
||||
if (!asyncInProgress)
|
||||
return;
|
||||
|
||||
if (!CancelIo(hOverlappedFile))
|
||||
{
|
||||
Console.Warning("Canceling gz prefetch failed. Following prefetching will not work.");
|
||||
return;
|
||||
}
|
||||
|
||||
asyncInProgress = false;
|
||||
};
|
||||
#endif /* _WIN32 */
|
||||
|
||||
bool GzippedFileReader::OkIndex(Error* error)
|
||||
{
|
||||
if (m_pIndex)
|
||||
return true;
|
||||
|
||||
// Try to read index from disk
|
||||
const std::string indexfile(iso2indexname(m_filename, error));
|
||||
if (indexfile.empty())
|
||||
|
@ -265,17 +148,9 @@ bool GzippedFileReader::OkIndex(Error* error)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (FileSystem::FileExists(indexfile.c_str()) && (m_pIndex = ReadIndexFromFile(indexfile.c_str())))
|
||||
if ((m_index = ReadIndexFromFile(indexfile.c_str())) != nullptr)
|
||||
{
|
||||
Console.WriteLn(Color_Green, "OK: Gzip quick access index read from disk: '%s'", indexfile.c_str());
|
||||
if (m_pIndex->span != GZFILE_SPAN_DEFAULT)
|
||||
{
|
||||
Console.Warning("Note: This index has %1.1f MB intervals, while the current default for new indexes is %1.1f MB.",
|
||||
(float)m_pIndex->span / 1024 / 1024, (float)GZFILE_SPAN_DEFAULT / 1024 / 1024);
|
||||
Console.Warning("It will work fine, but if you want to generate a new index with default intervals, delete this index file.");
|
||||
Console.Warning("(smaller intervals mean bigger index file and quicker but more frequent decompressions)");
|
||||
}
|
||||
InitZstates();
|
||||
INFO_LOG("Gzip quick access index read from disk: '{}'", indexfile);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -290,207 +165,82 @@ bool GzippedFileReader::OkIndex(Error* error)
|
|||
|
||||
if (len >= 0)
|
||||
{
|
||||
m_pIndex = index;
|
||||
WriteIndexToFile((Access*)m_pIndex, indexfile.c_str());
|
||||
m_index = index;
|
||||
WriteIndexToFile(m_index, indexfile.c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
Error::SetString(error, fmt::format("ERROR ({}): Index could not be generated for file '{}'", len, m_filename));
|
||||
Error::SetStringFmt(error, "ERROR ({}): Index could not be generated for file '{}'", len, m_filename);
|
||||
free_index(index);
|
||||
InitZstates();
|
||||
return false;
|
||||
}
|
||||
|
||||
InitZstates();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GzippedFileReader::Open(std::string filename, Error* error)
|
||||
bool GzippedFileReader::Open2(std::string filename, Error* error)
|
||||
{
|
||||
Close();
|
||||
|
||||
m_filename = std::move(filename);
|
||||
if (!(m_src = FileSystem::OpenCFile(m_filename.c_str(), "rb", error)) || !OkIndex(error))
|
||||
if (!(m_src = FileSystem::OpenCFile(m_filename.c_str(), "rb", error)) || !LoadOrCreateIndex(error))
|
||||
{
|
||||
Close();
|
||||
return false;
|
||||
}
|
||||
|
||||
AsyncPrefetchOpen();
|
||||
return true;
|
||||
};
|
||||
|
||||
void GzippedFileReader::BeginRead(void* pBuffer, uint sector, uint count)
|
||||
{
|
||||
// No a-sync support yet, implement as sync
|
||||
mBytesRead = ReadSync(pBuffer, sector, count);
|
||||
}
|
||||
|
||||
int GzippedFileReader::FinishRead()
|
||||
void GzippedFileReader::Close2()
|
||||
{
|
||||
int res = mBytesRead;
|
||||
mBytesRead = -1;
|
||||
return res;
|
||||
}
|
||||
|
||||
void GzippedFileReader::CancelRead()
|
||||
if (m_z_state.isValid)
|
||||
{
|
||||
inflateEnd(&m_z_state.strm);
|
||||
m_z_state = {};
|
||||
}
|
||||
|
||||
int GzippedFileReader::ReadSync(void* pBuffer, uint sector, uint count)
|
||||
{
|
||||
s64 offset = (s64)sector * m_blocksize + m_dataoffset;
|
||||
int bytesToRead = count * m_blocksize;
|
||||
int res = _ReadSync(pBuffer, offset, bytesToRead);
|
||||
if (res < 0)
|
||||
Console.Error("Error: iso-gzip read unsuccessful.");
|
||||
return res;
|
||||
}
|
||||
|
||||
// If we have a valid and adequate zstate for this span, use it, else, use the index
|
||||
s64 GzippedFileReader::GetOptimalExtractionStart(s64 offset)
|
||||
{
|
||||
int span = m_pIndex->span;
|
||||
Czstate& cstate = m_zstates[offset / span];
|
||||
s64 stateOffset = cstate.state.isValid ? cstate.state.out_offset : 0;
|
||||
if (stateOffset && stateOffset <= offset)
|
||||
return stateOffset; // state is faster than indexed
|
||||
|
||||
// If span is not exact multiples of GZFILE_READ_CHUNK_SIZE (because it was configured badly),
|
||||
// we fallback to always GZFILE_READ_CHUNK_SIZE boundaries
|
||||
if (span % GZFILE_READ_CHUNK_SIZE)
|
||||
return offset / GZFILE_READ_CHUNK_SIZE * GZFILE_READ_CHUNK_SIZE;
|
||||
|
||||
return span * (offset / span); // index direct access boundaries
|
||||
}
|
||||
|
||||
int GzippedFileReader::_ReadSync(void* pBuffer, s64 offset, uint bytesToRead)
|
||||
{
|
||||
if (!OkIndex(nullptr))
|
||||
return -1;
|
||||
|
||||
if ((offset + bytesToRead) > m_pIndex->uncompressed_size)
|
||||
return -1;
|
||||
|
||||
// Without all the caching, chunking and states, this would be enough:
|
||||
// return extract(m_src, m_pIndex, offset, (unsigned char*)pBuffer, bytesToRead);
|
||||
|
||||
// Split request to GZFILE_READ_CHUNK_SIZE chunks at GZFILE_READ_CHUNK_SIZE boundaries
|
||||
uint maxInChunk = GZFILE_READ_CHUNK_SIZE - offset % GZFILE_READ_CHUNK_SIZE;
|
||||
if (bytesToRead > maxInChunk)
|
||||
{
|
||||
int first = _ReadSync(pBuffer, offset, maxInChunk);
|
||||
if (first != (int)maxInChunk)
|
||||
return first; // EOF or failure
|
||||
|
||||
int rest = _ReadSync((char*)pBuffer + maxInChunk, offset + maxInChunk, bytesToRead - maxInChunk);
|
||||
if (rest < 0)
|
||||
return rest;
|
||||
|
||||
return first + rest;
|
||||
}
|
||||
|
||||
// From here onwards it's guarenteed that the request is inside a single GZFILE_READ_CHUNK_SIZE boundaries
|
||||
|
||||
int res = m_cache.Read(pBuffer, offset, bytesToRead);
|
||||
if (res >= 0)
|
||||
return res;
|
||||
|
||||
// Not available from cache. Decompress from optimal starting
|
||||
// point in GZFILE_READ_CHUNK_SIZE chunks and cache each chunk.
|
||||
Common::Timer start_time;
|
||||
s64 extractOffset = GetOptimalExtractionStart(offset); // guaranteed in GZFILE_READ_CHUNK_SIZE boundaries
|
||||
int size = offset + maxInChunk - extractOffset;
|
||||
unsigned char* extracted = (unsigned char*)malloc(size);
|
||||
|
||||
int span = m_pIndex->span;
|
||||
int spanix = extractOffset / span;
|
||||
AsyncPrefetchCancel();
|
||||
res = extract(m_src, m_pIndex, extractOffset, extracted, size, &(m_zstates[spanix].state));
|
||||
if (res < 0)
|
||||
{
|
||||
free(extracted);
|
||||
return res;
|
||||
}
|
||||
AsyncPrefetchChunk(getInOffset(&(m_zstates[spanix].state)));
|
||||
|
||||
int copied = ChunksCache::CopyAvailable(extracted, extractOffset, res, pBuffer, offset, bytesToRead);
|
||||
|
||||
if (m_zstates[spanix].state.isValid && (extractOffset + res) / span != offset / span)
|
||||
{
|
||||
// The state no longer matches this span.
|
||||
// move the state to the appropriate span because it will be faster than using the index
|
||||
int targetix = (extractOffset + res) / span;
|
||||
m_zstates[targetix].Kill();
|
||||
// We have elements for the entire file, and another one.
|
||||
m_zstates[targetix].state.in_offset = m_zstates[spanix].state.in_offset;
|
||||
m_zstates[targetix].state.isValid = m_zstates[spanix].state.isValid;
|
||||
m_zstates[targetix].state.out_offset = m_zstates[spanix].state.out_offset;
|
||||
inflateCopy(&m_zstates[targetix].state.strm, &m_zstates[spanix].state.strm);
|
||||
|
||||
m_zstates[spanix].Kill();
|
||||
}
|
||||
|
||||
if (size <= GZFILE_READ_CHUNK_SIZE)
|
||||
m_cache.Take(extracted, extractOffset, res, size);
|
||||
else
|
||||
{ // split into cacheable chunks
|
||||
for (int i = 0; i < size; i += GZFILE_READ_CHUNK_SIZE)
|
||||
{
|
||||
int available = std::clamp(res - i, 0, GZFILE_READ_CHUNK_SIZE);
|
||||
void* chunk = available ? malloc(available) : 0;
|
||||
if (available)
|
||||
memcpy(chunk, extracted + i, available);
|
||||
m_cache.Take(chunk, extractOffset + i, available, std::min(size - i, GZFILE_READ_CHUNK_SIZE));
|
||||
}
|
||||
free(extracted);
|
||||
}
|
||||
|
||||
if (const double duration = start_time.GetTimeMilliseconds(); duration > 10)
|
||||
{
|
||||
Console.WriteLn(Color_Gray, "gunzip: chunk #%5d-%2d : %1.2f MB - %d ms",
|
||||
(int)(offset / 4 / 1024 / 1024),
|
||||
(int)(offset % (4 * 1024 * 1024) / GZFILE_READ_CHUNK_SIZE),
|
||||
(float)size / 1024 / 1024,
|
||||
duration);
|
||||
}
|
||||
|
||||
return copied;
|
||||
}
|
||||
|
||||
void GzippedFileReader::Close()
|
||||
{
|
||||
m_filename.clear();
|
||||
if (m_pIndex)
|
||||
{
|
||||
free_index((Access*)m_pIndex);
|
||||
m_pIndex = 0;
|
||||
}
|
||||
|
||||
InitZstates(); // results in delete because no index
|
||||
m_cache.Clear();
|
||||
|
||||
if (m_src)
|
||||
{
|
||||
fclose(m_src);
|
||||
std::fclose(m_src);
|
||||
m_src = nullptr;
|
||||
}
|
||||
|
||||
AsyncPrefetchClose();
|
||||
if (m_index)
|
||||
{
|
||||
free_index(m_index);
|
||||
m_index = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
ThreadedFileReader::Chunk GzippedFileReader::ChunkForOffset(u64 offset)
|
||||
{
|
||||
ThreadedFileReader::Chunk chunk = {};
|
||||
if (static_cast<s64>(offset) >= m_index->uncompressed_size)
|
||||
{
|
||||
chunk.chunkID = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
chunk.chunkID = static_cast<s64>(offset) / m_index->span;
|
||||
chunk.length = static_cast<u32>(std::min<u64>(m_index->uncompressed_size - offset, m_index->span));
|
||||
chunk.offset = static_cast<u64>(chunk.chunkID) * m_index->span;
|
||||
}
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
int GzippedFileReader::ReadChunk(void* dst, s64 chunkID)
|
||||
{
|
||||
if (chunkID < 0 || chunkID >= m_index->size)
|
||||
return -1;
|
||||
|
||||
const s64 file_offset = chunkID * m_index->span;
|
||||
const u32 read_len = static_cast<u32>(std::min<s64>(m_index->uncompressed_size - file_offset, m_index->span));
|
||||
return extract(m_src, m_index, file_offset, static_cast<unsigned char*>(dst), read_len, &m_z_state);
|
||||
}
|
||||
|
||||
u32 GzippedFileReader::GetBlockCount() const
|
||||
{
|
||||
// type and formula copied from FlatFileReader
|
||||
// FIXME? : Shouldn't it be uint and (size - m_dataoffset) / m_blocksize ?
|
||||
return (int)((m_pIndex ? m_pIndex->uncompressed_size : 0) / m_blocksize);
|
||||
}
|
||||
|
||||
void GzippedFileReader::SetBlockSize(u32 bytes)
|
||||
{
|
||||
m_blocksize = bytes;
|
||||
}
|
||||
|
||||
void GzippedFileReader::SetDataOffset(u32 bytes)
|
||||
{
|
||||
m_dataoffset = bytes;
|
||||
return (m_index->uncompressed_size + (m_blocksize - 1)) / m_blocksize;
|
||||
}
|
||||
|
|
|
@ -1,79 +1,39 @@
|
|||
// SPDX-FileCopyrightText: 2002-2023 PCSX2 Dev Team
|
||||
// SPDX-FileCopyrightText: 2002-2024 PCSX2 Dev Team
|
||||
// SPDX-License-Identifier: LGPL-3.0+
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "AsyncFileReader.h"
|
||||
#include "ChunksCache.h"
|
||||
#include "CDVD/ThreadedFileReader.h"
|
||||
#include "zlib_indexed.h"
|
||||
|
||||
static constexpr int GZFILE_SPAN_DEFAULT = (1048576 * 4); /* distance between direct access points when creating a new index */
|
||||
static constexpr int GZFILE_READ_CHUNK_SIZE = (256 * 1024); /* zlib extraction chunks size (at 0-based boundaries) */
|
||||
static constexpr int GZFILE_CACHE_SIZE_MB = 200; /* cache size for extracted data. must be at least GZFILE_READ_CHUNK_SIZE (in MB)*/
|
||||
|
||||
typedef struct zstate Zstate;
|
||||
|
||||
class GzippedFileReader final : public AsyncFileReader
|
||||
class GzippedFileReader final : public ThreadedFileReader
|
||||
{
|
||||
DeclareNoncopyableObject(GzippedFileReader);
|
||||
|
||||
public:
|
||||
GzippedFileReader();
|
||||
~GzippedFileReader();;
|
||||
~GzippedFileReader();
|
||||
|
||||
bool Open(std::string filename, Error* error) override;
|
||||
bool Open2(std::string filename, Error* error) override;
|
||||
|
||||
int ReadSync(void* pBuffer, u32 sector, u32 count) override;
|
||||
Chunk ChunkForOffset(u64 offset) override;
|
||||
int ReadChunk(void* dst, s64 chunkID) override;
|
||||
|
||||
void BeginRead(void* pBuffer, u32 sector, u32 count) override;
|
||||
int FinishRead() override;
|
||||
void CancelRead() override;
|
||||
|
||||
void Close() override;
|
||||
void Close2() override;
|
||||
|
||||
u32 GetBlockCount() const override;
|
||||
|
||||
void SetBlockSize(u32 bytes) override;
|
||||
void SetDataOffset(u32 bytes) override;
|
||||
|
||||
private:
|
||||
class Czstate
|
||||
{
|
||||
public:
|
||||
Czstate() { state.isValid = 0; };
|
||||
~Czstate() { Kill(); };
|
||||
void Kill()
|
||||
{
|
||||
if (state.isValid)
|
||||
inflateEnd(&state.strm);
|
||||
state.isValid = 0;
|
||||
}
|
||||
Zstate state;
|
||||
};
|
||||
|
||||
bool OkIndex(Error* error); // Verifies that we have an index, or try to create one
|
||||
s64 GetOptimalExtractionStart(s64 offset);
|
||||
int _ReadSync(void* pBuffer, s64 offset, uint bytesToRead);
|
||||
void InitZstates();
|
||||
|
||||
int mBytesRead = 0; // Temp sync read result when simulating async read
|
||||
Access* m_pIndex = nullptr; // Quick access index
|
||||
Czstate* m_zstates = nullptr;
|
||||
FILE* m_src = nullptr;
|
||||
|
||||
ChunksCache m_cache;
|
||||
|
||||
#ifdef _WIN32
|
||||
// Used by async prefetch
|
||||
HANDLE hOverlappedFile = INVALID_HANDLE_VALUE;
|
||||
OVERLAPPED asyncOperationContext = {};
|
||||
bool asyncInProgress = false;
|
||||
char mDummyAsyncPrefetchTarget[GZFILE_READ_CHUNK_SIZE];
|
||||
#endif
|
||||
|
||||
void AsyncPrefetchReset();
|
||||
void AsyncPrefetchOpen();
|
||||
void AsyncPrefetchClose();
|
||||
void AsyncPrefetchChunk(s64 dummy);
|
||||
void AsyncPrefetchCancel();
|
||||
static constexpr int GZFILE_SPAN_DEFAULT = (1048576 * 4); /* distance between direct access points when creating a new index */
|
||||
static constexpr int GZFILE_READ_CHUNK_SIZE = (256 * 1024); /* zlib extraction chunks size (at 0-based boundaries) */
|
||||
static constexpr int GZFILE_CACHE_SIZE_MB = 200; /* cache size for extracted data. must be at least GZFILE_READ_CHUNK_SIZE (in MB)*/
|
||||
|
||||
// Verifies that we have an index, or try to create one
|
||||
bool LoadOrCreateIndex(Error* error);
|
||||
|
||||
Access* m_index = nullptr; // Quick access index
|
||||
|
||||
std::FILE* m_src = nullptr;
|
||||
|
||||
zstate m_z_state = {};
|
||||
};
|
||||
|
|
|
@ -216,7 +216,6 @@ set(pcsx2CDVDSources
|
|||
CDVD/IsoHasher.cpp
|
||||
CDVD/IsoReader.cpp
|
||||
CDVD/OutputIsoFile.cpp
|
||||
CDVD/ChunksCache.cpp
|
||||
CDVD/CompressedFileReader.cpp
|
||||
CDVD/ChdFileReader.cpp
|
||||
CDVD/CsoFileReader.cpp
|
||||
|
@ -232,7 +231,6 @@ set(pcsx2CDVDHeaders
|
|||
CDVD/CDVD.h
|
||||
CDVD/CDVD_internal.h
|
||||
CDVD/CDVDdiscReader.h
|
||||
CDVD/ChunksCache.h
|
||||
CDVD/CompressedFileReader.h
|
||||
CDVD/ChdFileReader.h
|
||||
CDVD/CsoFileReader.h
|
||||
|
|
|
@ -112,7 +112,6 @@
|
|||
<ClCompile Include="CDVD\CDVDdiscReader.cpp" />
|
||||
<ClCompile Include="CDVD\CDVDdiscThread.cpp" />
|
||||
<ClCompile Include="CDVD\ChdFileReader.cpp" />
|
||||
<ClCompile Include="CDVD\ChunksCache.cpp" />
|
||||
<ClCompile Include="CDVD\CompressedFileReader.cpp" />
|
||||
<ClCompile Include="CDVD\CsoFileReader.cpp" />
|
||||
<ClCompile Include="CDVD\FlatFileReader.cpp" />
|
||||
|
@ -464,9 +463,7 @@
|
|||
<ClInclude Include="AsyncFileReader.h" />
|
||||
<ClInclude Include="CDVD\BlockdumpFileReader.h" />
|
||||
<ClInclude Include="CDVD\CDVDdiscReader.h" />
|
||||
<ClInclude Include="CDVD\ChunksCache.h" />
|
||||
<ClInclude Include="CDVD\CompressedFileReader.h" />
|
||||
<ClInclude Include="CDVD\CompressedFileReaderUtils.h" />
|
||||
<ClInclude Include="CDVD\CsoFileReader.h" />
|
||||
<ClInclude Include="CDVD\ChdFileReader.h" />
|
||||
<ClInclude Include="CDVD\FlatFileReader.h" />
|
||||
|
|
|
@ -767,9 +767,6 @@
|
|||
<ClCompile Include="CDVD\GzippedFileReader.cpp">
|
||||
<Filter>System\ISO</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="CDVD\ChunksCache.cpp">
|
||||
<Filter>System\ISO</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="IopGte.cpp">
|
||||
<Filter>System\Ps2\Iop</Filter>
|
||||
</ClCompile>
|
||||
|
@ -1682,12 +1679,6 @@
|
|||
<ClInclude Include="CDVD\GzippedFileReader.h">
|
||||
<Filter>System\ISO</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="CDVD\ChunksCache.h">
|
||||
<Filter>System\ISO</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="CDVD\CompressedFileReaderUtils.h">
|
||||
<Filter>System\ISO</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="x86\R5900_Profiler.h">
|
||||
<Filter>System\Include</Filter>
|
||||
</ClInclude>
|
||||
|
|
Loading…
Reference in New Issue