discsystem-fix support for bins with variant sectorsizes

This commit is contained in:
zeromus 2013-08-08 19:26:37 +00:00
parent 6472d48c1b
commit 0cfdcc6f53
2 changed files with 108 additions and 45 deletions
BizHawk.Emulation/DiscSystem

View File

@ -71,7 +71,8 @@ namespace BizHawk.DiscSystem
blobPath = CueFileResolver[cue_file.Path];
int blob_sectorsize = Cue.BINSectorSizeForTrackType(cue_file.Tracks[0].TrackType);
int blob_length_aba, blob_leftover;
int blob_length_aba;
long blob_length_bytes;
IBlob cue_blob = null;
//try any way we can to acquire a file
@ -99,7 +100,7 @@ namespace BizHawk.DiscSystem
Blobs.Add(blob);
blob_length_aba = (int)(blob.Length / blob_sectorsize);
blob_leftover = (int)(blob.Length - blob_length_aba * blob_sectorsize);
blob_length_bytes = blob.Length;
cue_blob = blob;
}
else if (cue_file.FileType == Cue.CueFileType.ECM)
@ -113,7 +114,7 @@ namespace BizHawk.DiscSystem
blob.Parse(blobPath);
cue_blob = blob;
blob_length_aba = (int)(blob.Length / blob_sectorsize);
blob_leftover = (int)(blob.Length - blob_length_aba * blob_sectorsize);
blob_length_bytes = blob.Length;
}
else if (cue_file.FileType == Cue.CueFileType.Wave)
{
@ -156,7 +157,7 @@ namespace BizHawk.DiscSystem
}
blob_length_aba = (int)(blob.Length / blob_sectorsize);
blob_leftover = (int)(blob.Length - blob_length_aba * blob_sectorsize);
blob_length_bytes = blob.Length;
cue_blob = blob;
}
else throw new Exception("Internal error - Unhandled cue blob type");
@ -166,6 +167,9 @@ namespace BizHawk.DiscSystem
//start timekeeping for the blob. every time we hit an index, this will advance
int blob_timestamp = 0;
//because we can have different size sectors in a blob, we need to keep a file cursor within the blob
long blob_cursor = 0;
//the aba that this cue blob starts on
int blob_disc_aba_start = Sectors.Count;
@ -204,9 +208,7 @@ namespace BizHawk.DiscSystem
int blob_track_start = blob_timestamp;
//enforce a rule of our own: every track within the file must have the same sector size
//we do know that files can change between track types within a file, but we're not sure what to do if the sector size changes
if (Cue.BINSectorSizeForTrackType(cue_track.TrackType) != blob_sectorsize) throw new Cue.CueBrokenException("Found different sector sizes within a cue blob. We don't know how to handle that.");
//once upon a time we had a check here to prevent a single blob from containing variant sector sizes. but we support that now.
//check integrity of track sequence and setup data structures
//TODO - check for skipped tracks in cue parser instead
@ -286,7 +288,8 @@ namespace BizHawk.DiscSystem
//so we just emit a Sector_Raw
Sector_RawBlob sector_rawblob = new Sector_RawBlob();
sector_rawblob.Blob = cue_blob;
sector_rawblob.Offset = (long)blob_timestamp * 2352;
sector_rawblob.Offset = blob_cursor;
blob_cursor += 2352;
Sector_Mode1_or_Mode2_2352 sector_raw;
if(cue_track.TrackType == ETrackType.Mode1_2352)
sector_raw = new Sector_Mode1_2352();
@ -297,15 +300,7 @@ namespace BizHawk.DiscSystem
else throw new InvalidOperationException();
sector_raw.BaseSector = sector_rawblob;
//take care to handle final sectors that are too short.
if (is_last_aba_in_track && blob_leftover > 0)
{
Sector_ZeroPad sector_zeropad = new Sector_ZeroPad();
sector_zeropad.BaseSector = sector_rawblob;
sector_zeropad.BaseLength = 2352 - blob_leftover;
sector_raw.BaseSector = sector_zeropad;
Sectors.Add(new SectorEntry(sector_raw));
}
Sectors.Add(new SectorEntry(sector_raw));
break;
}
@ -316,8 +311,8 @@ namespace BizHawk.DiscSystem
int curr_disc_aba = Sectors.Count;
var sector_2048 = new Sector_Mode1_2048(curr_disc_aba + 150);
sector_2048.Blob = new ECMCacheBlob(cue_blob);
sector_2048.Offset = (long)blob_timestamp * 2048;
if (blob_leftover > 0) throw new Cue.CueBrokenException("TODO - Incomplete 2048 byte/sector bin files (iso files) not yet supported.");
sector_2048.Offset = blob_cursor;
blob_cursor += 2048;
Sectors.Add(new SectorEntry(sector_2048));
break;
}
@ -341,6 +336,13 @@ namespace BizHawk.DiscSystem
toc_track.length_aba = Sectors.Count - toc_track.Indexes[1].aba;
curr_track++;
//if we ran off the end of the blob, pad it with zeroes, I guess
if (blob_cursor > blob_length_bytes)
{
//mutate the blob to an encapsulating Blob_ZeroPadAdapter
Blobs[Blobs.Count - 1] = new Blob_ZeroPadAdapter(Blobs[Blobs.Count - 1], blob_length_bytes, blob_cursor - blob_length_bytes);
}
} //track loop
} //file loop

View File

@ -97,6 +97,65 @@ namespace BizHawk.DiscSystem
int Read(long byte_pos, byte[] buffer, int offset, int count);
}
public class Blob_ZeroPadAdapter : IBlob
{
public Blob_ZeroPadAdapter(IBlob baseBlob, long padFrom, long padLen)
{
this.baseBlob = baseBlob;
this.padFrom = padFrom;
this.padLen = padLen;
}
public int Read(long byte_pos, byte[] buffer, int offset, int count)
{
throw new NotImplementedException("Blob_ZeroPadAdapter hasnt been tested yet! please report this!");
//something about this seems unnecessarily complex, but... i dunno.
//figure out how much remains until the zero-padding begins
long remain = byte_pos - padFrom;
int todo;
if (remain < count)
todo = (int)remain;
else todo = count;
//read up until the zero-padding
int totalRead = 0;
int readed = baseBlob.Read(byte_pos, buffer, offset, todo);
totalRead += readed;
offset += todo;
//if we didnt read enough, we certainly shouldnt try to read any more
if (readed < todo)
return readed;
//if that was all we needed, then we're done
count -= todo;
if (count == 0)
return totalRead;
//if we need more, it must come from zero-padding
remain = padLen;
if (remain < count)
todo = (int)remain;
else todo = count;
Array.Clear(buffer, offset, todo);
totalRead += todo;
return totalRead;
}
public void Dispose()
{
baseBlob.Dispose();
}
IBlob baseBlob;
long padFrom;
long padLen;
}
class Blob_RawFile : IBlob
{
public string PhysicalPath {
@ -176,31 +235,33 @@ namespace BizHawk.DiscSystem
}
}
/// <summary>
/// this ISector adapts another ISector by always returning zeroes
/// TODO I dont like the way this works. I think blobs should get adapted instead to zero-pad to a certain length.
/// </summary>
class Sector_ZeroPad : ISector
{
public ISector BaseSector;
public int BaseLength;
public int Read(byte[] buffer, int offset)
{
return _Read(buffer, offset, 2352);
}
public int Read_2048(byte[] buffer, int offset)
{
return _Read(buffer, offset, 2352);
}
int _Read(byte[] buffer, int offset, int amount)
{
int read = BaseSector.Read(buffer, offset);
if(read < BaseLength) return read;
for (int i = BaseLength; i < amount; i++)
buffer[offset + i] = 0;
return amount;
}
}
// ------ replaced by Blob_ZeroPadAdapter
///// <summary>
///// this ISector adapts another ISector by always returning zeroes
///// TODO I dont like the way this works. I think blobs should get adapted instead to zero-pad to a certain length.
///// </summary>
//class Sector_ZeroPad : ISector
//{
// public ISector BaseSector;
// public int BaseLength;
// public int Read(byte[] buffer, int offset)
// {
// return _Read(buffer, offset, 2352);
// }
// public int Read_2048(byte[] buffer, int offset)
// {
// return _Read(buffer, offset, 2352);
// }
// int _Read(byte[] buffer, int offset, int amount)
// {
// int read = BaseSector.Read(buffer, offset);
// if(read < BaseLength) return read;
// for (int i = BaseLength; i < amount; i++)
// buffer[offset + i] = 0;
// return amount;
// }
//}
abstract class Sector_Mode1_or_Mode2_2352 : ISector
{
@ -297,7 +358,7 @@ namespace BizHawk.DiscSystem
public int Read_2048(byte[] buffer, int offset)
{
//this is easy. we only have 2048 bytes, and 2048 bytes wer erequested
//this is easy. we only have 2048 bytes, and 2048 bytes were requested
return Blob.BaseBlob.Read(Offset, buffer, offset, 2048);
}