From 73caa212c059e965697cdc5845449b140c8a10f7 Mon Sep 17 00:00:00 2001 From: Asnivor Date: Mon, 14 Jan 2019 14:30:25 +0000 Subject: [PATCH] EmuHawk: SharpCompress implementation for Mono --- .../BizHawk.Client.Common.csproj | 333 ++- .../SharpCompress/Archives/AbstractArchive.cs | 179 ++ .../Archives/AbstractWritableArchive.cs | 147 ++ .../SharpCompress/Archives/ArchiveFactory.cs | 153 ++ .../Archives/GZip/GZipArchive.cs | 188 ++ .../Archives/GZip/GZipArchiveEntry.cs | 34 + .../Archives/GZip/GZipWritableArchiveEntry.cs | 66 + .../SharpCompress/Archives/IArchive.cs | 49 + .../SharpCompress/Archives/IArchiveEntry.cs | 24 + .../Archives/IArchiveEntryExtensions.cs | 70 + .../Archives/IArchiveExtensions.cs | 26 + .../Archives/IArchiveExtractionListener.cs | 11 + .../Archives/IWritableArchive.cs | 15 + .../Archives/IWritableArchiveEntry.cs | 9 + .../Archives/IWritableArchiveExtensions.cs | 63 + .../Archives/Rar/FileInfoRarArchiveVolume.cs | 46 + .../Archives/Rar/FileInfoRarFilePart.cs | 28 + .../Archives/Rar/RarArchive.Extensions.cs | 23 + .../SharpCompress/Archives/Rar/RarArchive.cs | 148 ++ .../Archives/Rar/RarArchiveEntry.cs | 89 + .../Archives/Rar/RarArchiveEntryFactory.cs | 47 + .../Archives/Rar/RarArchiveVolumeFactory.cs | 147 ++ .../Archives/Rar/SeekableFilePart.cs | 33 + .../Archives/Rar/StreamRarArchiveVolume.cs | 27 + .../Archives/SevenZip/SevenZipArchive.cs | 226 ++ .../Archives/SevenZip/SevenZipArchiveEntry.cs | 28 + .../SharpCompress/Archives/Tar/TarArchive.cs | 206 ++ .../Archives/Tar/TarArchiveEntry.cs | 29 + .../Archives/Tar/TarWritableArchiveEntry.cs | 65 + .../SharpCompress/Archives/Zip/ZipArchive.cs | 214 ++ .../Archives/Zip/ZipArchiveEntry.cs | 30 + .../Archives/Zip/ZipWritableArchiveEntry.cs | 68 + .../SharpCompress/Buffers/ArrayPool.cs | 119 + .../SharpCompress/Buffers/DefaultArrayPool.cs | 144 ++ .../Buffers/DefaultArrayPoolBucket.cs | 111 + .../SharpCompress/Buffers/Utilities.cs | 38 + .../SharpCompress/Common/ArchiveEncoding.cs | 77 + .../SharpCompress/Common/ArchiveException.cs | 12 + .../Common/ArchiveExtractionEventArgs.cs | 14 + .../SharpCompress/Common/ArchiveType.cs | 11 + .../Common/CompressedBytesReadEventArgs.cs | 17 + .../SharpCompress/Common/CompressionType.cs | 19 + .../Common/CryptographicException.cs | 12 + .../SharpCompress/Common/Entry.cs | 91 + .../SharpCompress/Common/EntryStream.cs | 92 + .../Common/ExtractionException.cs | 17 + .../SharpCompress/Common/ExtractionMethods.cs | 98 + .../SharpCompress/Common/ExtractionOptions.cs | 34 + .../SharpCompress/Common/FilePart.cs | 20 + .../FilePartExtractionBeginEventArgs.cs | 22 + .../SharpCompress/Common/FlagUtility.cs | 108 + .../SharpCompress/Common/GZip/GZipEntry.cs | 50 + .../SharpCompress/Common/GZip/GZipFilePart.cs | 120 + .../SharpCompress/Common/GZip/GZipVolume.cs | 25 + .../SharpCompress/Common/IEntry.Extensions.cs | 51 + .../SharpCompress/Common/IEntry.cs | 22 + .../Common/IExtractionListener.cs | 8 + .../SharpCompress/Common/IVolume.cs | 12 + .../Common/IncompleteArchiveException.cs | 10 + .../Common/InvalidFormatException.cs | 17 + .../Common/MultiVolumeExtractionException.cs | 17 + .../MultipartStreamRequiredException.cs | 10 + .../SharpCompress/Common/OptionsBase.cs | 13 + .../Common/PasswordProtectedException.cs | 17 + .../Common/Rar/Headers/AVHeader.cs | 30 + .../Common/Rar/Headers/ArchiveCryptHeader.cs | 57 + .../Common/Rar/Headers/ArchiveHeader.cs | 81 + .../Common/Rar/Headers/CommentHeader.cs | 28 + .../Common/Rar/Headers/EndArchiveHeader.cs | 43 + .../Common/Rar/Headers/FileHeader.cs | 452 ++++ .../Common/Rar/Headers/FileNameDecoder.cs | 78 + .../SharpCompress/Common/Rar/Headers/Flags.cs | 149 ++ .../Common/Rar/Headers/IRarHeader.cs | 7 + .../Common/Rar/Headers/MarkHeader.cs | 96 + .../Common/Rar/Headers/NewSubHeaderType.cs | 55 + .../Common/Rar/Headers/ProtectHeader.cs | 28 + .../Common/Rar/Headers/RarHeader.cs | 130 ++ .../Common/Rar/Headers/RarHeaderFactory.cs | 192 ++ .../Common/Rar/Headers/SignHeader.cs | 26 + .../Common/Rar/RarCrcBinaryReader.cs | 50 + .../Common/Rar/RarCryptoBinaryReader.cs | 115 + .../Common/Rar/RarCryptoWrapper.cs | 99 + .../SharpCompress/Common/Rar/RarEntry.cs | 65 + .../SharpCompress/Common/Rar/RarFilePart.cs | 27 + .../SharpCompress/Common/Rar/RarRijndael.cs | 121 + .../SharpCompress/Common/Rar/RarVolume.cs | 112 + .../Common/ReaderExtractionEventArgs.cs | 17 + .../Common/SevenZip/ArchiveDatabase.cs | 182 ++ .../Common/SevenZip/ArchiveReader.cs | 1591 +++++++++++++ .../Common/SevenZip/CBindPair.cs | 8 + .../Common/SevenZip/CCoderInfo.cs | 10 + .../Common/SevenZip/CFileItem.cs | 36 + .../SharpCompress/Common/SevenZip/CFolder.cs | 188 ++ .../Common/SevenZip/CMethodId.cs | 57 + .../Common/SevenZip/CStreamSwitch.cs | 69 + .../Common/SevenZip/DataReader.cs | 186 ++ .../Common/SevenZip/SevenZipEntry.cs | 45 + .../Common/SevenZip/SevenZipFilePart.cs | 106 + .../Common/SevenZip/SevenZipVolume.cs | 14 + .../Common/Tar/Headers/EntryType.cs | 19 + .../Common/Tar/Headers/TarHeader.cs | 282 +++ .../SharpCompress/Common/Tar/TarEntry.cs | 67 + .../SharpCompress/Common/Tar/TarFilePart.cs | 37 + .../Common/Tar/TarHeaderFactory.cs | 64 + .../Common/Tar/TarReadOnlySubStream.cs | 105 + .../SharpCompress/Common/Tar/TarVolume.cs | 13 + .../SharpCompress/Common/Volume.cs | 51 + .../Common/Zip/Headers/DirectoryEndHeader.cs | 44 + .../Zip/Headers/DirectoryEntryHeader.cs | 98 + .../Common/Zip/Headers/HeaderFlags.cs | 17 + .../Common/Zip/Headers/IgnoreHeader.cs | 17 + .../Common/Zip/Headers/LocalEntryHeader.cs | 70 + .../Headers/LocalEntryHeaderExtraFactory.cs | 149 ++ .../Common/Zip/Headers/SplitHeader.cs | 18 + .../Zip/Headers/Zip64DirectoryEndHeader.cs | 49 + .../Headers/Zip64DirectoryEndLocatorHeader.cs | 25 + .../Common/Zip/Headers/ZipFileEntry.cs | 102 + .../Common/Zip/Headers/ZipHeader.cs | 19 + .../Common/Zip/Headers/ZipHeaderType.cs | 13 + .../Zip/PkwareTraditionalCryptoStream.cs | 108 + .../Zip/PkwareTraditionalEncryptionData.cs | 112 + .../Common/Zip/SeekableZipFilePart.cs | 42 + .../Common/Zip/SeekableZipHeaderFactory.cs | 109 + .../Common/Zip/StreamingZipFilePart.cs | 62 + .../Common/Zip/StreamingZipHeaderFactory.cs | 70 + .../Common/Zip/WinzipAesCryptoStream.cs | 184 ++ .../Common/Zip/WinzipAesEncryptionData.cs | 79 + .../Common/Zip/WinzipAesKeySize.cs | 9 + .../Common/Zip/ZipCompressionMethod.cs | 13 + .../SharpCompress/Common/Zip/ZipEntry.cs | 85 + .../SharpCompress/Common/Zip/ZipFilePart.cs | 187 ++ .../Common/Zip/ZipHeaderFactory.cs | 192 ++ .../SharpCompress/Common/Zip/ZipVolume.cs | 15 + .../SharpCompress/Compressors/ADC/ADCBase.cs | 220 ++ .../Compressors/ADC/ADCStream.cs | 173 ++ .../Compressors/BZip2/BZip2Constants.cs | 101 + .../Compressors/BZip2/BZip2Stream.cs | 111 + .../Compressors/BZip2/CBZip2InputStream.cs | 1094 +++++++++ .../Compressors/BZip2/CBZip2OutputStream.cs | 1964 ++++++++++++++++ .../SharpCompress/Compressors/BZip2/CRC.cs | 203 ++ .../Compressors/CompressionMode.cs | 8 + .../Compressors/Deflate/CRC32.cs | 293 +++ .../Compressors/Deflate/DeflateManager.cs | 1987 ++++++++++++++++ .../Compressors/Deflate/DeflateStream.cs | 366 +++ .../Compressors/Deflate/FlushType.cs | 44 + .../Compressors/Deflate/GZipStream.cs | 479 ++++ .../Compressors/Deflate/InfTree.cs | 576 +++++ .../Compressors/Deflate/Inflate.cs | 1989 +++++++++++++++++ .../SharpCompress/Compressors/Deflate/Tree.cs | 487 ++++ .../SharpCompress/Compressors/Deflate/Zlib.cs | 492 ++++ .../Compressors/Deflate/ZlibBaseStream.cs | 650 ++++++ .../Compressors/Deflate/ZlibCodec.cs | 746 +++++++ .../Compressors/Deflate/ZlibConstants.cs | 125 ++ .../Compressors/Deflate/ZlibStream.cs | 344 +++ .../Compressors/Deflate64/BlockType.cs | 13 + .../Compressors/Deflate64/Deflate64Stream.cs | 255 +++ .../Compressors/Deflate64/DeflateInput.cs | 43 + .../Deflate64/FastEncoderStatus.cs | 245 ++ .../Compressors/Deflate64/HuffmanTree.cs | 311 +++ .../Compressors/Deflate64/InflaterManaged.cs | 738 ++++++ .../Compressors/Deflate64/InflaterState.cs | 42 + .../Compressors/Deflate64/InputBuffer.cs | 202 ++ .../Compressors/Deflate64/Match.cs | 17 + .../Compressors/Deflate64/MatchState.cs | 13 + .../Compressors/Deflate64/OutputWindow.cs | 151 ++ .../Compressors/Filters/BCJ2Filter.cs | 221 ++ .../Compressors/Filters/BCJFilter.cs | 113 + .../Compressors/Filters/Filter.cs | 154 ++ .../Compressors/LZMA/AesDecoderStream.cs | 266 +++ .../Compressors/LZMA/Bcj2DecoderStream.cs | 283 +++ .../Compressors/LZMA/BitVector.cs | 101 + .../SharpCompress/Compressors/LZMA/CRC.cs | 89 + .../Compressors/LZMA/DecoderStream.cs | 182 ++ .../SharpCompress/Compressors/LZMA/ICoder.cs | 172 ++ .../Compressors/LZMA/LZ/LzBinTree.cs | 424 ++++ .../Compressors/LZMA/LZ/LzInWindow.cs | 183 ++ .../Compressors/LZMA/LZ/LzOutWindow.cs | 205 ++ .../Compressors/LZMA/LZipStream.cs | 203 ++ .../SharpCompress/Compressors/LZMA/Log.cs | 94 + .../Compressors/LZMA/LzmaBase.cs | 109 + .../Compressors/LZMA/LzmaDecoder.cs | 481 ++++ .../Compressors/LZMA/LzmaEncoder.cs | 1797 +++++++++++++++ .../Compressors/LZMA/LzmaEncoderProperties.cs | 55 + .../Compressors/LZMA/LzmaStream.cs | 318 +++ .../Compressors/LZMA/RangeCoder/RangeCoder.cs | 252 +++ .../LZMA/RangeCoder/RangeCoderBit.cs | 140 ++ .../LZMA/RangeCoder/RangeCoderBitTree.cs | 163 ++ .../Compressors/LZMA/Registry.cs | 58 + .../LZMA/Utilites/CrcBuilderStream.cs | 84 + .../LZMA/Utilites/CrcCheckStream.cs | 105 + .../LZMA/Utilites/IPasswordProvider.cs | 7 + .../Compressors/LZMA/Utilites/Utils.cs | 92 + .../Compressors/PPMd/H/FreqData.cs | 67 + .../Compressors/PPMd/H/ModelPPM.cs | 915 ++++++++ .../Compressors/PPMd/H/PPMContext.cs | 567 +++++ .../Compressors/PPMd/H/Pointer.cs | 25 + .../Compressors/PPMd/H/RangeCoder.cs | 155 ++ .../Compressors/PPMd/H/RarMemBlock.cs | 126 ++ .../Compressors/PPMd/H/RarNode.cs | 54 + .../Compressors/PPMd/H/SEE2Context.cs | 75 + .../SharpCompress/Compressors/PPMd/H/State.cs | 98 + .../Compressors/PPMd/H/StateRef.cs | 67 + .../Compressors/PPMd/H/SubAllocator.cs | 451 ++++ .../Compressors/PPMd/I1/Allocator.cs | 489 ++++ .../Compressors/PPMd/I1/Coder.cs | 104 + .../Compressors/PPMd/I1/MemoryNode.cs | 247 ++ .../Compressors/PPMd/I1/Model.cs | 923 ++++++++ .../PPMd/I1/ModelRestorationMethod.cs | 29 + .../Compressors/PPMd/I1/Pointer.cs | 365 +++ .../Compressors/PPMd/I1/PpmContext.cs | 815 +++++++ .../Compressors/PPMd/I1/PpmState.cs | 197 ++ .../Compressors/PPMd/I1/See2Context.cs | 55 + .../Compressors/PPMd/PpmdProperties.cs | 71 + .../Compressors/PPMd/PpmdStream.cs | 142 ++ .../Compressors/PPMd/PpmdVersion.cs | 9 + .../Compressors/Rar/IRarUnpack.cs | 18 + .../Rar/MultiVolumeReadOnlyStream.cs | 141 ++ .../SharpCompress/Compressors/Rar/RarCRC.cs | 48 + .../Compressors/Rar/RarCrcStream.cs | 45 + .../Compressors/Rar/RarStream.cs | 129 ++ .../Rar/UnpackV1/Decode/AudioVariables.cs | 26 + .../Rar/UnpackV1/Decode/BitDecode.cs | 10 + .../Rar/UnpackV1/Decode/CodeType.cs | 15 + .../Compressors/Rar/UnpackV1/Decode/Decode.cs | 34 + .../Rar/UnpackV1/Decode/DistDecode.cs | 10 + .../Rar/UnpackV1/Decode/FilterType.cs | 10 + .../Rar/UnpackV1/Decode/LitDecode.cs | 10 + .../Rar/UnpackV1/Decode/LowDistDecode.cs | 10 + .../Rar/UnpackV1/Decode/MultDecode.cs | 10 + .../Rar/UnpackV1/Decode/PackDef.cs | 48 + .../Rar/UnpackV1/Decode/RepDecode.cs | 10 + .../Rar/UnpackV1/PPM/BlockTypes.cs | 8 + .../Compressors/Rar/UnpackV1/Unpack.cs | 1270 +++++++++++ .../Compressors/Rar/UnpackV1/Unpack15.cs | 720 ++++++ .../Compressors/Rar/UnpackV1/Unpack20.cs | 524 +++++ .../Compressors/Rar/UnpackV1/Unpack50.cs | 841 +++++++ .../Compressors/Rar/UnpackV1/UnpackFilter.cs | 33 + .../Compressors/Rar/UnpackV1/UnpackInline.cs | 31 + .../Compressors/Rar/UnpackV1/UnpackUtility.cs | 220 ++ .../Rar/UnpackV2017/BitInput.getbits_cpp.cs | 67 + .../Rar/UnpackV2017/BitInput.getbits_hpp.cs | 70 + .../FragmentedWindow.unpack50frag_cpp.cs | 154 ++ .../Rar/UnpackV2017/PackDef.compress_hpp.cs | 56 + .../Compressors/Rar/UnpackV2017/Unpack.cs | 108 + .../Rar/UnpackV2017/Unpack.rawint_hpp.cs | 126 ++ .../Rar/UnpackV2017/Unpack.unpack15_cpp.cs | 501 +++++ .../Rar/UnpackV2017/Unpack.unpack20_cpp.cs | 399 ++++ .../Rar/UnpackV2017/Unpack.unpack30_cpp.cs | 800 +++++++ .../Rar/UnpackV2017/Unpack.unpack50_cpp.cs | 712 ++++++ .../Rar/UnpackV2017/Unpack.unpack_cpp.cs | 377 ++++ .../UnpackV2017/Unpack.unpackinline_cpp.cs | 173 ++ .../Compressors/Rar/UnpackV2017/notes.txt | 50 + .../Compressors/Rar/UnpackV2017/unpack_hpp.cs | 441 ++++ .../Compressors/Rar/VM/BitInput.cs | 86 + .../SharpCompress/Compressors/Rar/VM/RarVM.cs | 1452 ++++++++++++ .../Compressors/Rar/VM/VMCmdFlags.cs | 46 + .../Compressors/Rar/VM/VMCommands.cs | 78 + .../Compressors/Rar/VM/VMFlags.cs | 10 + .../Compressors/Rar/VM/VMOpType.cs | 10 + .../Compressors/Rar/VM/VMPreparedCommand.cs | 17 + .../Compressors/Rar/VM/VMPreparedOperand.cs | 10 + .../Compressors/Rar/VM/VMPreparedProgram.cs | 21 + .../Rar/VM/VMStandardFilterSignature.cs | 18 + .../Compressors/Rar/VM/VMStandardFilters.cs | 14 + .../Compressors/Xz/BinaryUtils.cs | 54 + .../SharpCompress/Compressors/Xz/CheckType.cs | 10 + .../SharpCompress/Compressors/Xz/Crc32.cs | 60 + .../SharpCompress/Compressors/Xz/Crc64.cs | 57 + .../Compressors/Xz/Filters/BlockFilter.cs | 51 + .../Compressors/Xz/Filters/Lzma2Filter.cs | 60 + .../Compressors/Xz/MultiByteIntegers.cs | 32 + .../Compressors/Xz/ReadOnlyStream.cs | 44 + .../SharpCompress/Compressors/Xz/XZBlock.cs | 165 ++ .../SharpCompress/Compressors/Xz/XZFooter.cs | 49 + .../SharpCompress/Compressors/Xz/XZHeader.cs | 55 + .../SharpCompress/Compressors/Xz/XZIndex.cs | 73 + .../Xz/XZIndexMarkerReachedException.cs | 8 + .../Compressors/Xz/XZIndexRecord.cs | 22 + .../Compressors/Xz/XZReadOnlyStream.cs | 14 + .../SharpCompress/Compressors/Xz/XZStream.cs | 116 + .../SharpCompress/Converters/DataConverter.cs | 1405 ++++++++++++ .../SharpCompress/Crypto/Crc32Stream.cs | 119 + .../SharpCompress/Crypto/CryptoException.cs | 25 + .../Crypto/DataLengthException.cs | 35 + .../SharpCompress/Crypto/IBlockCipher.cs | 34 + .../SharpCompress/Crypto/ICipherParameters.cs | 6 + .../SharpCompress/Crypto/KeyParameter.cs | 48 + .../SharpCompress/Crypto/RijndaelEngine.cs | 731 ++++++ .../SharpCompress/EnumExtensions.cs | 18 + .../SharpCompress/IO/BufferedSubStream.cs | 83 + .../IO/CountingWritableSubStream.cs | 56 + .../SharpCompress/IO/ListeningStream.cs | 79 + .../SharpCompress/IO/MarkingBinaryReader.cs | 206 ++ .../SharpCompress/IO/NonDisposingStream.cs | 61 + .../SharpCompress/IO/ReadOnlySubStream.cs | 83 + .../SharpCompress/IO/RewindableStream.cs | 164 ++ .../SharpCompress/IO/StreamingMode.cs | 8 + BizHawk.Client.Common/SharpCompress/Lazy.cs | 29 + .../SharpCompress/LazyReadOnlyCollection.cs | 151 ++ .../SharpCompress/ReadOnlyCollection.cs | 55 + .../SharpCompress/Readers/AbstractReader.cs | 232 ++ .../SharpCompress/Readers/GZip/GZipReader.cs | 39 + .../SharpCompress/Readers/IReader.cs | 39 + .../Readers/IReaderExtensions.cs | 66 + .../Readers/IReaderExtractionListener.cs | 9 + .../Readers/Rar/MultiVolumeRarReader.cs | 119 + .../Readers/Rar/NonSeekableStreamFilePart.cs | 21 + .../SharpCompress/Readers/Rar/RarReader.cs | 77 + .../Readers/Rar/RarReaderEntry.cs | 34 + .../Readers/Rar/RarReaderVolume.cs | 26 + .../Readers/Rar/SingleVolumeRarReader.cs | 30 + .../SharpCompress/Readers/ReaderFactory.cs | 107 + .../SharpCompress/Readers/ReaderOptions.cs | 14 + .../SharpCompress/Readers/ReaderProgress.cs | 24 + .../SharpCompress/Readers/Tar/TarReader.cs | 124 + .../SharpCompress/Readers/Zip/ZipReader.cs | 61 + .../SharpCompress/Utility.cs | 443 ++++ .../SharpCompress/Writers/AbstractWriter.cs | 57 + .../SharpCompress/Writers/GZip/GZipWriter.cs | 49 + .../Writers/GZip/GZipWriterOptions.cs | 28 + .../SharpCompress/Writers/IWriter.cs | 12 + .../Writers/IWriterExtensions.cs | 64 + .../SharpCompress/Writers/Tar/TarWriter.cs | 130 ++ .../Writers/Tar/TarWriterOptions.cs | 23 + .../SharpCompress/Writers/WriterFactory.cs | 39 + .../SharpCompress/Writers/WriterOptions.cs | 18 + .../Writers/Zip/ZipCentralDirectoryEntry.cs | 106 + .../SharpCompress/Writers/Zip/ZipWriter.cs | 484 ++++ .../Writers/Zip/ZipWriterEntryOptions.cs | 27 + .../Writers/Zip/ZipWriterOptions.cs | 44 + .../SharpCompressArchiveHandler.cs | 100 + BizHawk.Client.EmuHawk/Program.cs | 5 +- 332 files changed, 55136 insertions(+), 2 deletions(-) create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/AbstractArchive.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/AbstractWritableArchive.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/ArchiveFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipArchive.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipArchiveEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipWritableArchiveEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/IArchive.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/IArchiveEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/IArchiveEntryExtensions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/IArchiveExtensions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/IArchiveExtractionListener.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/IWritableArchive.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/IWritableArchiveEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/IWritableArchiveExtensions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Rar/FileInfoRarArchiveVolume.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Rar/FileInfoRarFilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchive.Extensions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchive.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveEntryFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveVolumeFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Rar/SeekableFilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Rar/StreamRarArchiveVolume.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/SevenZip/SevenZipArchive.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/SevenZip/SevenZipArchiveEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Tar/TarArchive.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Tar/TarArchiveEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Tar/TarWritableArchiveEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipArchive.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipArchiveEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipWritableArchiveEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Buffers/ArrayPool.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Buffers/DefaultArrayPool.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Buffers/DefaultArrayPoolBucket.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Buffers/Utilities.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/ArchiveEncoding.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/ArchiveException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/ArchiveExtractionEventArgs.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/ArchiveType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/CompressedBytesReadEventArgs.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/CompressionType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/CryptographicException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Entry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/EntryStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/ExtractionException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/ExtractionMethods.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/ExtractionOptions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/FilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/FilePartExtractionBeginEventArgs.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/FlagUtility.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/GZip/GZipEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/GZip/GZipFilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/GZip/GZipVolume.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/IEntry.Extensions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/IEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/IExtractionListener.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/IVolume.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/IncompleteArchiveException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/InvalidFormatException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/MultiVolumeExtractionException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/MultipartStreamRequiredException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/OptionsBase.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/PasswordProtectedException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/AVHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ArchiveCryptHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ArchiveHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/CommentHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/EndArchiveHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/FileHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/FileNameDecoder.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/Flags.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/IRarHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/MarkHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/NewSubHeaderType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ProtectHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/RarHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/RarHeaderFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/SignHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/RarCrcBinaryReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/RarCryptoBinaryReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/RarCryptoWrapper.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/RarEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/RarFilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/RarRijndael.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Rar/RarVolume.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/ReaderExtractionEventArgs.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/ArchiveDatabase.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/ArchiveReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/CBindPair.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/CCoderInfo.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/CFileItem.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/CFolder.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/CMethodId.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/CStreamSwitch.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/DataReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipFilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipVolume.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Tar/Headers/EntryType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Tar/Headers/TarHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Tar/TarEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Tar/TarFilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Tar/TarHeaderFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Tar/TarReadOnlySubStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Tar/TarVolume.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Volume.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/DirectoryEndHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/DirectoryEntryHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/HeaderFlags.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/IgnoreHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/LocalEntryHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/LocalEntryHeaderExtraFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/SplitHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndLocatorHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipFileEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipHeaderType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/PkwareTraditionalCryptoStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/PkwareTraditionalEncryptionData.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/SeekableZipFilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/SeekableZipHeaderFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/StreamingZipFilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/StreamingZipHeaderFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesCryptoStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesEncryptionData.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesKeySize.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/ZipCompressionMethod.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/ZipEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/ZipFilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/ZipHeaderFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Common/Zip/ZipVolume.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/ADC/ADCBase.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/ADC/ADCStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/BZip2/BZip2Constants.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/BZip2/BZip2Stream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CBZip2InputStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CBZip2OutputStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CRC.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/CompressionMode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/CRC32.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/DeflateManager.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/DeflateStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/FlushType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/GZipStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/InfTree.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/Inflate.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/Tree.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/Zlib.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibBaseStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibCodec.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibConstants.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/BlockType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/Deflate64Stream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/DeflateInput.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/FastEncoderStatus.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/HuffmanTree.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InflaterManaged.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InflaterState.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InputBuffer.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/Match.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/MatchState.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/OutputWindow.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Filters/BCJ2Filter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Filters/BCJFilter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Filters/Filter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/AesDecoderStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Bcj2DecoderStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/BitVector.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/CRC.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/DecoderStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/ICoder.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzBinTree.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzInWindow.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzOutWindow.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZipStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Log.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaBase.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaDecoder.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaEncoder.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaEncoderProperties.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoder.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoderBit.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoderBitTree.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Registry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/CrcBuilderStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/CrcCheckStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/IPasswordProvider.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/Utils.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/FreqData.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/ModelPPM.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/PPMContext.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/Pointer.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RangeCoder.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RarMemBlock.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RarNode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/SEE2Context.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/State.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/StateRef.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/SubAllocator.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Allocator.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Coder.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/MemoryNode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Model.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/ModelRestorationMethod.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Pointer.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/PpmContext.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/PpmState.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/See2Context.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdProperties.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdVersion.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/IRarUnpack.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/MultiVolumeReadOnlyStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarCRC.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarCrcStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/AudioVariables.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/BitDecode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/CodeType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/Decode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/DistDecode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/FilterType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/LitDecode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/LowDistDecode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/MultDecode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/PackDef.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/RepDecode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/PPM/BlockTypes.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack15.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack20.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack50.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackFilter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackInline.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackUtility.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_cpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_hpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/FragmentedWindow.unpack50frag_cpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/PackDef.compress_hpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.rawint_hpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack15_cpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack20_cpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack30_cpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack50_cpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack_cpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpackinline_cpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/notes.txt create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/unpack_hpp.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/BitInput.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/RarVM.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMCmdFlags.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMCommands.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMFlags.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMOpType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedCommand.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedOperand.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedProgram.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMStandardFilterSignature.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMStandardFilters.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/BinaryUtils.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/CheckType.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/Crc32.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/Crc64.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/Filters/BlockFilter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/Filters/Lzma2Filter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/MultiByteIntegers.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/ReadOnlyStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZBlock.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZFooter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZHeader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndex.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndexMarkerReachedException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndexRecord.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZReadOnlyStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Converters/DataConverter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Crypto/Crc32Stream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Crypto/CryptoException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Crypto/DataLengthException.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Crypto/IBlockCipher.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Crypto/ICipherParameters.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Crypto/KeyParameter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Crypto/RijndaelEngine.cs create mode 100644 BizHawk.Client.Common/SharpCompress/EnumExtensions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/IO/BufferedSubStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/IO/CountingWritableSubStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/IO/ListeningStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/IO/MarkingBinaryReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/IO/NonDisposingStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/IO/ReadOnlySubStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/IO/RewindableStream.cs create mode 100644 BizHawk.Client.Common/SharpCompress/IO/StreamingMode.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Lazy.cs create mode 100644 BizHawk.Client.Common/SharpCompress/LazyReadOnlyCollection.cs create mode 100644 BizHawk.Client.Common/SharpCompress/ReadOnlyCollection.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/AbstractReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/GZip/GZipReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/IReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/IReaderExtensions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/IReaderExtractionListener.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/Rar/MultiVolumeRarReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/Rar/NonSeekableStreamFilePart.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReaderEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReaderVolume.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/Rar/SingleVolumeRarReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/ReaderFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/ReaderOptions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/ReaderProgress.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/Tar/TarReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Readers/Zip/ZipReader.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Utility.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/AbstractWriter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/GZip/GZipWriter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/GZip/GZipWriterOptions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/IWriter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/IWriterExtensions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/Tar/TarWriter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/Tar/TarWriterOptions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/WriterFactory.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/WriterOptions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipCentralDirectoryEntry.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriter.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriterEntryOptions.cs create mode 100644 BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriterOptions.cs create mode 100644 BizHawk.Client.Common/SharpCompressArchiveHandler.cs diff --git a/BizHawk.Client.Common/BizHawk.Client.Common.csproj b/BizHawk.Client.Common/BizHawk.Client.Common.csproj index 9816bf2bfb..0b878042f5 100644 --- a/BizHawk.Client.Common/BizHawk.Client.Common.csproj +++ b/BizHawk.Client.Common/BizHawk.Client.Common.csproj @@ -255,8 +255,337 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -318,7 +647,9 @@ BizHawk.Bizware.BizwareGL - + + + diff --git a/BizHawk.Client.Common/SharpCompress/Archives/AbstractArchive.cs b/BizHawk.Client.Common/SharpCompress/Archives/AbstractArchive.cs new file mode 100644 index 0000000000..2981f734c8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/AbstractArchive.cs @@ -0,0 +1,179 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Readers; + +namespace SharpCompress.Archives +{ + public abstract class AbstractArchive : IArchive, IArchiveExtractionListener + where TEntry : IArchiveEntry + where TVolume : IVolume + { + private readonly LazyReadOnlyCollection lazyVolumes; + private readonly LazyReadOnlyCollection lazyEntries; + + public event EventHandler> EntryExtractionBegin; + public event EventHandler> EntryExtractionEnd; + + public event EventHandler CompressedBytesRead; + public event EventHandler FilePartExtractionBegin; + + protected ReaderOptions ReaderOptions { get; } + + private bool disposed; + +#if !NO_FILE + internal AbstractArchive(ArchiveType type, FileInfo fileInfo, ReaderOptions readerOptions) + { + Type = type; + if (!fileInfo.Exists) + { + throw new ArgumentException("File does not exist: " + fileInfo.FullName); + } + ReaderOptions = readerOptions; + readerOptions.LeaveStreamOpen = false; + lazyVolumes = new LazyReadOnlyCollection(LoadVolumes(fileInfo)); + lazyEntries = new LazyReadOnlyCollection(LoadEntries(Volumes)); + } + + + protected abstract IEnumerable LoadVolumes(FileInfo file); +#endif + + internal AbstractArchive(ArchiveType type, IEnumerable streams, ReaderOptions readerOptions) + { + Type = type; + ReaderOptions = readerOptions; + lazyVolumes = new LazyReadOnlyCollection(LoadVolumes(streams.Select(CheckStreams))); + lazyEntries = new LazyReadOnlyCollection(LoadEntries(Volumes)); + } + + internal AbstractArchive(ArchiveType type) + { + Type = type; + lazyVolumes = new LazyReadOnlyCollection(Enumerable.Empty()); + lazyEntries = new LazyReadOnlyCollection(Enumerable.Empty()); + } + + public ArchiveType Type { get; } + + void IArchiveExtractionListener.FireEntryExtractionBegin(IArchiveEntry entry) + { + EntryExtractionBegin?.Invoke(this, new ArchiveExtractionEventArgs(entry)); + } + + void IArchiveExtractionListener.FireEntryExtractionEnd(IArchiveEntry entry) + { + EntryExtractionEnd?.Invoke(this, new ArchiveExtractionEventArgs(entry)); + } + + private static Stream CheckStreams(Stream stream) + { + if (!stream.CanSeek || !stream.CanRead) + { + throw new ArgumentException("Archive streams must be Readable and Seekable"); + } + return stream; + } + + /// + /// Returns an ReadOnlyCollection of all the RarArchiveEntries across the one or many parts of the RarArchive. + /// + public virtual ICollection Entries { get { return lazyEntries; } } + + /// + /// Returns an ReadOnlyCollection of all the RarArchiveVolumes across the one or many parts of the RarArchive. + /// + public ICollection Volumes { get { return lazyVolumes; } } + + /// + /// The total size of the files compressed in the archive. + /// + public virtual long TotalSize { get { return Entries.Aggregate(0L, (total, cf) => total + cf.CompressedSize); } } + + /// + /// The total size of the files as uncompressed in the archive. + /// + public virtual long TotalUncompressSize { get { return Entries.Aggregate(0L, (total, cf) => total + cf.Size); } } + + protected abstract IEnumerable LoadVolumes(IEnumerable streams); + protected abstract IEnumerable LoadEntries(IEnumerable volumes); + + IEnumerable IArchive.Entries { get { return Entries.Cast(); } } + + IEnumerable IArchive.Volumes { get { return lazyVolumes.Cast(); } } + + public virtual void Dispose() + { + if (!disposed) + { + lazyVolumes.ForEach(v => v.Dispose()); + lazyEntries.GetLoaded().Cast().ForEach(x => x.Close()); + disposed = true; + } + } + + void IArchiveExtractionListener.EnsureEntriesLoaded() + { + lazyEntries.EnsureFullyLoaded(); + lazyVolumes.EnsureFullyLoaded(); + } + + void IExtractionListener.FireCompressedBytesRead(long currentPartCompressedBytes, long compressedReadBytes) + { + CompressedBytesRead?.Invoke(this, new CompressedBytesReadEventArgs + { + CurrentFilePartCompressedBytesRead = currentPartCompressedBytes, + CompressedBytesRead = compressedReadBytes + }); + } + + void IExtractionListener.FireFilePartExtractionBegin(string name, long size, long compressedSize) + { + FilePartExtractionBegin?.Invoke(this, new FilePartExtractionBeginEventArgs + { + CompressedSize = compressedSize, + Size = size, + Name = name + }); + } + + /// + /// Use this method to extract all entries in an archive in order. + /// This is primarily for SOLID Rar Archives or 7Zip Archives as they need to be + /// extracted sequentially for the best performance. + /// + /// This method will load all entry information from the archive. + /// + /// WARNING: this will reuse the underlying stream for the archive. Errors may + /// occur if this is used at the same time as other extraction methods on this instance. + /// + /// + public IReader ExtractAllEntries() + { + ((IArchiveExtractionListener)this).EnsureEntriesLoaded(); + return CreateReaderForSolidExtraction(); + } + + protected abstract IReader CreateReaderForSolidExtraction(); + + /// + /// Archive is SOLID (this means the Archive saved bytes by reusing information which helps for archives containing many small files). + /// + public virtual bool IsSolid { get { return false; } } + + /// + /// The archive can find all the parts of the archive needed to fully extract the archive. This forces the parsing of the entire archive. + /// + public bool IsComplete + { + get + { + ((IArchiveExtractionListener)this).EnsureEntriesLoaded(); + return Entries.All(x => x.IsComplete); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/AbstractWritableArchive.cs b/BizHawk.Client.Common/SharpCompress/Archives/AbstractWritableArchive.cs new file mode 100644 index 0000000000..5f38d459ee --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/AbstractWritableArchive.cs @@ -0,0 +1,147 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Readers; +using SharpCompress.Writers; + +namespace SharpCompress.Archives +{ + public abstract class AbstractWritableArchive : AbstractArchive, IWritableArchive + where TEntry : IArchiveEntry + where TVolume : IVolume + { + private readonly List newEntries = new List(); + private readonly List removedEntries = new List(); + + private readonly List modifiedEntries = new List(); + private bool hasModifications; + + internal AbstractWritableArchive(ArchiveType type) + : base(type) + { + } + + internal AbstractWritableArchive(ArchiveType type, Stream stream, ReaderOptions readerFactoryOptions) + : base(type, stream.AsEnumerable(), readerFactoryOptions) + { + } + +#if !NO_FILE + internal AbstractWritableArchive(ArchiveType type, FileInfo fileInfo, ReaderOptions readerFactoryOptions) + : base(type, fileInfo, readerFactoryOptions) + { + } +#endif + + public override ICollection Entries + { + get + { + if (hasModifications) + { + return modifiedEntries; + } + return base.Entries; + } + } + + private void RebuildModifiedCollection() + { + hasModifications = true; + newEntries.RemoveAll(v => removedEntries.Contains(v)); + modifiedEntries.Clear(); + modifiedEntries.AddRange(OldEntries.Concat(newEntries)); + } + + private IEnumerable OldEntries { get { return base.Entries.Where(x => !removedEntries.Contains(x)); } } + + public void RemoveEntry(TEntry entry) + { + if (!removedEntries.Contains(entry)) + { + removedEntries.Add(entry); + RebuildModifiedCollection(); + } + } + + void IWritableArchive.RemoveEntry(IArchiveEntry entry) + { + RemoveEntry((TEntry)entry); + } + + public TEntry AddEntry(string key, Stream source, + long size = 0, DateTime? modified = null) + { + return AddEntry(key, source, false, size, modified); + } + + IArchiveEntry IWritableArchive.AddEntry(string key, Stream source, bool closeStream, long size, DateTime? modified) + { + return AddEntry(key, source, closeStream, size, modified); + } + + public TEntry AddEntry(string key, Stream source, bool closeStream, + long size = 0, DateTime? modified = null) + { + if (key.StartsWith("/") + || key.StartsWith("\\")) + { + key = key.Substring(1); + } + if (DoesKeyMatchExisting(key)) + { + throw new ArchiveException("Cannot add entry with duplicate key: " + key); + } + var entry = CreateEntry(key, source, size, modified, closeStream); + newEntries.Add(entry); + RebuildModifiedCollection(); + return entry; + } + + private bool DoesKeyMatchExisting(string key) + { + foreach (var path in Entries.Select(x => x.Key)) + { + var p = path.Replace('/', '\\'); + if (p.StartsWith("\\")) + { + p = p.Substring(1); + } + return string.Equals(p, key, StringComparison.OrdinalIgnoreCase); + } + return false; + } + + public void SaveTo(Stream stream, WriterOptions options) + { + //reset streams of new entries + newEntries.Cast().ForEach(x => x.Stream.Seek(0, SeekOrigin.Begin)); + SaveTo(stream, options, OldEntries, newEntries); + } + + protected TEntry CreateEntry(string key, Stream source, long size, DateTime? modified, + bool closeStream) + { + if (!source.CanRead || !source.CanSeek) + { + throw new ArgumentException("Streams must be readable and seekable to use the Writing Archive API"); + } + return CreateEntryInternal(key, source, size, modified, closeStream); + } + + protected abstract TEntry CreateEntryInternal(string key, Stream source, long size, DateTime? modified, + bool closeStream); + + protected abstract void SaveTo(Stream stream, WriterOptions options, IEnumerable oldEntries, IEnumerable newEntries); + + public override void Dispose() + { + base.Dispose(); + newEntries.Cast().ForEach(x => x.Close()); + removedEntries.Cast().ForEach(x => x.Close()); + modifiedEntries.Cast().ForEach(x => x.Close()); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/ArchiveFactory.cs b/BizHawk.Client.Common/SharpCompress/Archives/ArchiveFactory.cs new file mode 100644 index 0000000000..41435e2254 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/ArchiveFactory.cs @@ -0,0 +1,153 @@ +using System; +using System.IO; +using SharpCompress.Archives.GZip; +using SharpCompress.Archives.Rar; +using SharpCompress.Archives.SevenZip; +using SharpCompress.Archives.Tar; +using SharpCompress.Archives.Zip; +using SharpCompress.Common; +using SharpCompress.Compressors.LZMA; +using SharpCompress.Readers; + +namespace SharpCompress.Archives +{ + public class ArchiveFactory + { + /// + /// Opens an Archive for random access + /// + /// + /// + /// + public static IArchive Open(Stream stream, ReaderOptions readerOptions = null) + { + stream.CheckNotNull("stream"); + if (!stream.CanRead || !stream.CanSeek) + { + throw new ArgumentException("Stream should be readable and seekable"); + } + readerOptions = readerOptions ?? new ReaderOptions(); + if (ZipArchive.IsZipFile(stream, null)) + { + stream.Seek(0, SeekOrigin.Begin); + return ZipArchive.Open(stream, readerOptions); + } + stream.Seek(0, SeekOrigin.Begin); + if (SevenZipArchive.IsSevenZipFile(stream)) + { + stream.Seek(0, SeekOrigin.Begin); + return SevenZipArchive.Open(stream, readerOptions); + } + stream.Seek(0, SeekOrigin.Begin); + if (GZipArchive.IsGZipFile(stream)) + { + stream.Seek(0, SeekOrigin.Begin); + return GZipArchive.Open(stream, readerOptions); + } + stream.Seek(0, SeekOrigin.Begin); + if (RarArchive.IsRarFile(stream, readerOptions)) + { + stream.Seek(0, SeekOrigin.Begin); + return RarArchive.Open(stream, readerOptions); + } + stream.Seek(0, SeekOrigin.Begin); + if (TarArchive.IsTarFile(stream)) + { + stream.Seek(0, SeekOrigin.Begin); + return TarArchive.Open(stream, readerOptions); + } + throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip, LZip"); + } + + public static IWritableArchive Create(ArchiveType type) + { + switch (type) + { + case ArchiveType.Zip: + { + return ZipArchive.Create(); + } + case ArchiveType.Tar: + { + return TarArchive.Create(); + } + case ArchiveType.GZip: + { + return GZipArchive.Create(); + } + default: + { + throw new NotSupportedException("Cannot create Archives of type: " + type); + } + } + } + +#if !NO_FILE + + /// + /// Constructor expects a filepath to an existing file. + /// + /// + /// + public static IArchive Open(string filePath, ReaderOptions options = null) + { + filePath.CheckNotNullOrEmpty("filePath"); + return Open(new FileInfo(filePath), options); + } + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + public static IArchive Open(FileInfo fileInfo, ReaderOptions options = null) + { + fileInfo.CheckNotNull("fileInfo"); + options = options ?? new ReaderOptions { LeaveStreamOpen = false }; + using (var stream = fileInfo.OpenRead()) + { + if (ZipArchive.IsZipFile(stream, null)) + { + return ZipArchive.Open(fileInfo, options); + } + stream.Seek(0, SeekOrigin.Begin); + if (SevenZipArchive.IsSevenZipFile(stream)) + { + return SevenZipArchive.Open(fileInfo, options); + } + stream.Seek(0, SeekOrigin.Begin); + if (GZipArchive.IsGZipFile(stream)) + { + return GZipArchive.Open(fileInfo, options); + } + stream.Seek(0, SeekOrigin.Begin); + if (RarArchive.IsRarFile(stream, options)) + { + return RarArchive.Open(fileInfo, options); + } + stream.Seek(0, SeekOrigin.Begin); + if (TarArchive.IsTarFile(stream)) + { + return TarArchive.Open(fileInfo, options); + } + throw new InvalidOperationException("Cannot determine compressed stream type. Supported Archive Formats: Zip, GZip, Tar, Rar, 7Zip"); + } + } + + /// + /// Extract to specific directory, retaining filename + /// + public static void WriteToDirectory(string sourceArchive, string destinationDirectory, + ExtractionOptions options = null) + { + using (IArchive archive = Open(sourceArchive)) + { + foreach (IArchiveEntry entry in archive.Entries) + { + entry.WriteToDirectory(destinationDirectory, options); + } + } + } +#endif + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipArchive.cs b/BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipArchive.cs new file mode 100644 index 0000000000..1ae40c5cc0 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipArchive.cs @@ -0,0 +1,188 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Common.GZip; +using SharpCompress.Readers; +using SharpCompress.Readers.GZip; +using SharpCompress.Writers; +using SharpCompress.Writers.GZip; + +namespace SharpCompress.Archives.GZip +{ + public class GZipArchive : AbstractWritableArchive + { +#if !NO_FILE + + /// + /// Constructor expects a filepath to an existing file. + /// + /// + /// + public static GZipArchive Open(string filePath, ReaderOptions readerOptions = null) + { + filePath.CheckNotNullOrEmpty("filePath"); + return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions()); + } + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + public static GZipArchive Open(FileInfo fileInfo, ReaderOptions readerOptions = null) + { + fileInfo.CheckNotNull("fileInfo"); + return new GZipArchive(fileInfo, readerOptions ?? new ReaderOptions()); + } +#endif + + /// + /// Takes a seekable Stream as a source + /// + /// + /// + public static GZipArchive Open(Stream stream, ReaderOptions readerOptions = null) + { + stream.CheckNotNull("stream"); + return new GZipArchive(stream, readerOptions ?? new ReaderOptions()); + } + + public static GZipArchive Create() + { + return new GZipArchive(); + } + +#if !NO_FILE + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + internal GZipArchive(FileInfo fileInfo, ReaderOptions options) + : base(ArchiveType.GZip, fileInfo, options) + { + } + + protected override IEnumerable LoadVolumes(FileInfo file) + { + return new GZipVolume(file, ReaderOptions).AsEnumerable(); + } + + public static bool IsGZipFile(string filePath) + { + return IsGZipFile(new FileInfo(filePath)); + } + + public static bool IsGZipFile(FileInfo fileInfo) + { + if (!fileInfo.Exists) + { + return false; + } + using (Stream stream = fileInfo.OpenRead()) + { + return IsGZipFile(stream); + } + } + + public void SaveTo(string filePath) + { + SaveTo(new FileInfo(filePath)); + } + + public void SaveTo(FileInfo fileInfo) + { + using (var stream = fileInfo.Open(FileMode.Create, FileAccess.Write)) + { + SaveTo(stream, new WriterOptions(CompressionType.GZip)); + } + } +#endif + + public static bool IsGZipFile(Stream stream) + { + // read the header on the first read + byte[] header = new byte[10]; + + // workitem 8501: handle edge case (decompress empty stream) + if (!stream.ReadFully(header)) + { + return false; + } + + if (header[0] != 0x1F || header[1] != 0x8B || header[2] != 8) + { + return false; + } + + return true; + } + + /// + /// Takes multiple seekable Streams for a multi-part archive + /// + /// + /// + internal GZipArchive(Stream stream, ReaderOptions options) + : base(ArchiveType.GZip, stream, options) + { + } + + internal GZipArchive() + : base(ArchiveType.GZip) + { + } + + protected override GZipArchiveEntry CreateEntryInternal(string filePath, Stream source, long size, DateTime? modified, + bool closeStream) + { + if (Entries.Any()) + { + throw new InvalidOperationException("Only one entry is allowed in a GZip Archive"); + } + return new GZipWritableArchiveEntry(this, source, filePath, size, modified, closeStream); + } + + protected override void SaveTo(Stream stream, WriterOptions options, + IEnumerable oldEntries, + IEnumerable newEntries) + { + if (Entries.Count > 1) + { + throw new InvalidOperationException("Only one entry is allowed in a GZip Archive"); + } + using (var writer = new GZipWriter(stream, new GZipWriterOptions(options))) + { + foreach (var entry in oldEntries.Concat(newEntries) + .Where(x => !x.IsDirectory)) + { + using (var entryStream = entry.OpenEntryStream()) + { + writer.Write(entry.Key, entryStream, entry.LastModifiedTime); + } + } + } + } + + protected override IEnumerable LoadVolumes(IEnumerable streams) + { + return new GZipVolume(streams.First(), ReaderOptions).AsEnumerable(); + } + + protected override IEnumerable LoadEntries(IEnumerable volumes) + { + Stream stream = volumes.Single().Stream; + yield return new GZipArchiveEntry(this, new GZipFilePart(stream, ReaderOptions.ArchiveEncoding)); + } + + protected override IReader CreateReaderForSolidExtraction() + { + var stream = Volumes.Single().Stream; + stream.Position = 0; + return GZipReader.Open(stream); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipArchiveEntry.cs b/BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipArchiveEntry.cs new file mode 100644 index 0000000000..7f417171e4 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipArchiveEntry.cs @@ -0,0 +1,34 @@ +using System.IO; +using System.Linq; +using SharpCompress.Common.GZip; + +namespace SharpCompress.Archives.GZip +{ + public class GZipArchiveEntry : GZipEntry, IArchiveEntry + { + internal GZipArchiveEntry(GZipArchive archive, GZipFilePart part) + : base(part) + { + Archive = archive; + } + + public virtual Stream OpenEntryStream() + { + //this is to reset the stream to be read multiple times + var part = Parts.Single() as GZipFilePart; + if (part.GetRawStream().Position != part.EntryStartPosition) + { + part.GetRawStream().Position = part.EntryStartPosition; + } + return Parts.Single().GetCompressedStream(); + } + + #region IArchiveEntry Members + + public IArchive Archive { get; } + + public bool IsComplete => true; + + #endregion + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipWritableArchiveEntry.cs b/BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipWritableArchiveEntry.cs new file mode 100644 index 0000000000..8bf96f3928 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/GZip/GZipWritableArchiveEntry.cs @@ -0,0 +1,66 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common; +using SharpCompress.IO; + +namespace SharpCompress.Archives.GZip +{ + internal class GZipWritableArchiveEntry : GZipArchiveEntry, IWritableArchiveEntry + { + private readonly bool closeStream; + private readonly Stream stream; + + internal GZipWritableArchiveEntry(GZipArchive archive, Stream stream, + string path, long size, DateTime? lastModified, bool closeStream) + : base(archive, null) + { + this.stream = stream; + Key = path; + Size = size; + LastModifiedTime = lastModified; + this.closeStream = closeStream; + } + + public override long Crc => 0; + + public override string Key { get; } + + public override long CompressedSize => 0; + + public override long Size { get; } + + public override DateTime? LastModifiedTime { get; } + + public override DateTime? CreatedTime => null; + + public override DateTime? LastAccessedTime => null; + + public override DateTime? ArchivedTime => null; + + public override bool IsEncrypted => false; + + public override bool IsDirectory => false; + + public override bool IsSplitAfter => false; + + internal override IEnumerable Parts => throw new NotImplementedException(); + + Stream IWritableArchiveEntry.Stream => stream; + + public override Stream OpenEntryStream() + { + //ensure new stream is at the start, this could be reset + stream.Seek(0, SeekOrigin.Begin); + return new NonDisposingStream(stream); + } + + internal override void Close() + { + if (closeStream) + { + stream.Dispose(); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/IArchive.cs b/BizHawk.Client.Common/SharpCompress/Archives/IArchive.cs new file mode 100644 index 0000000000..2ba84a399b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/IArchive.cs @@ -0,0 +1,49 @@ +using System; +using System.Collections.Generic; +using SharpCompress.Common; +using SharpCompress.Readers; + +namespace SharpCompress.Archives +{ + public interface IArchive : IDisposable + { + event EventHandler> EntryExtractionBegin; + event EventHandler> EntryExtractionEnd; + + event EventHandler CompressedBytesRead; + event EventHandler FilePartExtractionBegin; + + IEnumerable Entries { get; } + IEnumerable Volumes { get; } + + ArchiveType Type { get; } + + /// + /// Use this method to extract all entries in an archive in order. + /// This is primarily for SOLID Rar Archives or 7Zip Archives as they need to be + /// extracted sequentially for the best performance. + /// + IReader ExtractAllEntries(); + + /// + /// Archive is SOLID (this means the Archive saved bytes by reusing information which helps for archives containing many small files). + /// Rar Archives can be SOLID while all 7Zip archives are considered SOLID. + /// + bool IsSolid { get; } + + /// + /// This checks to see if all the known entries have IsComplete = true + /// + bool IsComplete { get; } + + /// + /// The total size of the files compressed in the archive. + /// + long TotalSize { get; } + + /// + /// The total size of the files as uncompressed in the archive. + /// + long TotalUncompressSize { get; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/IArchiveEntry.cs b/BizHawk.Client.Common/SharpCompress/Archives/IArchiveEntry.cs new file mode 100644 index 0000000000..43e681b454 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/IArchiveEntry.cs @@ -0,0 +1,24 @@ +using System.IO; +using SharpCompress.Common; + +namespace SharpCompress.Archives +{ + public interface IArchiveEntry : IEntry + { + /// + /// Opens the current entry as a stream that will decompress as it is read. + /// Read the entire stream or use SkipEntry on EntryStream. + /// + Stream OpenEntryStream(); + + /// + /// The archive can find all the parts of the archive needed to extract this entry. + /// + bool IsComplete { get; } + + /// + /// The archive instance this entry belongs to + /// + IArchive Archive { get; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/IArchiveEntryExtensions.cs b/BizHawk.Client.Common/SharpCompress/Archives/IArchiveEntryExtensions.cs new file mode 100644 index 0000000000..e1716fb30c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/IArchiveEntryExtensions.cs @@ -0,0 +1,70 @@ +using System.IO; +using SharpCompress.Common; +using SharpCompress.IO; + +namespace SharpCompress.Archives +{ + public static class IArchiveEntryExtensions + { + public static void WriteTo(this IArchiveEntry archiveEntry, Stream streamToWriteTo) + { + if (archiveEntry.Archive.Type == ArchiveType.Rar && archiveEntry.Archive.IsSolid) + { + throw new InvalidFormatException("Cannot use Archive random access on SOLID Rar files."); + } + + if (archiveEntry.IsDirectory) + { + throw new ExtractionException("Entry is a file directory and cannot be extracted."); + } + + var streamListener = archiveEntry.Archive as IArchiveExtractionListener; + streamListener.EnsureEntriesLoaded(); + streamListener.FireEntryExtractionBegin(archiveEntry); + streamListener.FireFilePartExtractionBegin(archiveEntry.Key, archiveEntry.Size, archiveEntry.CompressedSize); + var entryStream = archiveEntry.OpenEntryStream(); + if (entryStream == null) + { + return; + } + using (entryStream) + { + using (Stream s = new ListeningStream(streamListener, entryStream)) + { + s.TransferTo(streamToWriteTo); + } + } + streamListener.FireEntryExtractionEnd(archiveEntry); + } + +#if !NO_FILE + +/// +/// Extract to specific directory, retaining filename +/// + public static void WriteToDirectory(this IArchiveEntry entry, string destinationDirectory, + ExtractionOptions options = null) + { + ExtractionMethods.WriteEntryToDirectory(entry, destinationDirectory, options, + entry.WriteToFile); + } + + /// + /// Extract to specific file + /// + public static void WriteToFile(this IArchiveEntry entry, string destinationFileName, + ExtractionOptions options = null) + { + + ExtractionMethods.WriteEntryToFile(entry, destinationFileName, options, + (x, fm) => + { + using (FileStream fs = File.Open(destinationFileName, fm)) + { + entry.WriteTo(fs); + } + }); + } +#endif + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/IArchiveExtensions.cs b/BizHawk.Client.Common/SharpCompress/Archives/IArchiveExtensions.cs new file mode 100644 index 0000000000..7b66966631 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/IArchiveExtensions.cs @@ -0,0 +1,26 @@ +#if !NO_FILE +using System.Linq; +using SharpCompress.Common; + +#endif + +namespace SharpCompress.Archives +{ + public static class IArchiveExtensions + { +#if !NO_FILE + +/// +/// Extract to specific directory, retaining filename +/// + public static void WriteToDirectory(this IArchive archive, string destinationDirectory, + ExtractionOptions options = null) + { + foreach (IArchiveEntry entry in archive.Entries.Where(x => !x.IsDirectory)) + { + entry.WriteToDirectory(destinationDirectory, options); + } + } +#endif + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/IArchiveExtractionListener.cs b/BizHawk.Client.Common/SharpCompress/Archives/IArchiveExtractionListener.cs new file mode 100644 index 0000000000..9ce07e8ae6 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/IArchiveExtractionListener.cs @@ -0,0 +1,11 @@ +using SharpCompress.Common; + +namespace SharpCompress.Archives +{ + internal interface IArchiveExtractionListener : IExtractionListener + { + void EnsureEntriesLoaded(); + void FireEntryExtractionBegin(IArchiveEntry entry); + void FireEntryExtractionEnd(IArchiveEntry entry); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/IWritableArchive.cs b/BizHawk.Client.Common/SharpCompress/Archives/IWritableArchive.cs new file mode 100644 index 0000000000..380d681482 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/IWritableArchive.cs @@ -0,0 +1,15 @@ +using System; +using System.IO; +using SharpCompress.Writers; + +namespace SharpCompress.Archives +{ + public interface IWritableArchive : IArchive + { + void RemoveEntry(IArchiveEntry entry); + + IArchiveEntry AddEntry(string key, Stream source, bool closeStream, long size = 0, DateTime? modified = null); + + void SaveTo(Stream stream, WriterOptions options); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/IWritableArchiveEntry.cs b/BizHawk.Client.Common/SharpCompress/Archives/IWritableArchiveEntry.cs new file mode 100644 index 0000000000..044eb1c7b7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/IWritableArchiveEntry.cs @@ -0,0 +1,9 @@ +using System.IO; + +namespace SharpCompress.Archives +{ + internal interface IWritableArchiveEntry + { + Stream Stream { get; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/IWritableArchiveExtensions.cs b/BizHawk.Client.Common/SharpCompress/Archives/IWritableArchiveExtensions.cs new file mode 100644 index 0000000000..bee42a4994 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/IWritableArchiveExtensions.cs @@ -0,0 +1,63 @@ +#if !NO_FILE +using System; +#endif +using System.IO; +using SharpCompress.Writers; + +namespace SharpCompress.Archives +{ + public static class IWritableArchiveExtensions + { +#if !NO_FILE + + public static void AddEntry(this IWritableArchive writableArchive, + string entryPath, string filePath) + { + var fileInfo = new FileInfo(filePath); + if (!fileInfo.Exists) + { + throw new FileNotFoundException("Could not AddEntry: " + filePath); + } + writableArchive.AddEntry(entryPath, new FileInfo(filePath).OpenRead(), true, fileInfo.Length, + fileInfo.LastWriteTime); + } + + public static void SaveTo(this IWritableArchive writableArchive, string filePath, WriterOptions options) + { + writableArchive.SaveTo(new FileInfo(filePath), options); + } + + public static void SaveTo(this IWritableArchive writableArchive, FileInfo fileInfo, WriterOptions options) + { + using (var stream = fileInfo.Open(FileMode.Create, FileAccess.Write)) + { + writableArchive.SaveTo(stream, options); + } + } + + public static void AddAllFromDirectory( + this IWritableArchive writableArchive, + string filePath, string searchPattern = "*.*", SearchOption searchOption = SearchOption.AllDirectories) + { +#if NET35 + foreach (var path in Directory.GetFiles(filePath, searchPattern, searchOption)) +#else + foreach (var path in Directory.EnumerateFiles(filePath, searchPattern, searchOption)) +#endif + { + var fileInfo = new FileInfo(path); + writableArchive.AddEntry(path.Substring(filePath.Length), fileInfo.OpenRead(), true, fileInfo.Length, + fileInfo.LastWriteTime); + } + } + public static IArchiveEntry AddEntry(this IWritableArchive writableArchive, string key, FileInfo fileInfo) + { + if (!fileInfo.Exists) + { + throw new ArgumentException("FileInfo does not exist."); + } + return writableArchive.AddEntry(key, fileInfo.OpenRead(), true, fileInfo.Length, fileInfo.LastWriteTime); + } +#endif + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Rar/FileInfoRarArchiveVolume.cs b/BizHawk.Client.Common/SharpCompress/Archives/Rar/FileInfoRarArchiveVolume.cs new file mode 100644 index 0000000000..7932caadfe --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Rar/FileInfoRarArchiveVolume.cs @@ -0,0 +1,46 @@ + +#if !NO_FILE +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common.Rar; +using SharpCompress.Common.Rar.Headers; +using SharpCompress.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Archives.Rar +{ + /// + /// A rar part based on a FileInfo object + /// + internal class FileInfoRarArchiveVolume : RarVolume + { + internal FileInfoRarArchiveVolume(FileInfo fileInfo, ReaderOptions options) + : base(StreamingMode.Seekable, fileInfo.OpenRead(), FixOptions(options)) + { + FileInfo = fileInfo; + FileParts = GetVolumeFileParts().ToReadOnly(); + } + + private static ReaderOptions FixOptions(ReaderOptions options) + { + //make sure we're closing streams with fileinfo + options.LeaveStreamOpen = false; + return options; + } + + internal ReadOnlyCollection FileParts { get; } + + internal FileInfo FileInfo { get; } + + internal override RarFilePart CreateFilePart(MarkHeader markHeader, FileHeader fileHeader) + { + return new FileInfoRarFilePart(this, ReaderOptions.Password, markHeader, fileHeader, FileInfo); + } + + internal override IEnumerable ReadFileParts() + { + return FileParts; + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Rar/FileInfoRarFilePart.cs b/BizHawk.Client.Common/SharpCompress/Archives/Rar/FileInfoRarFilePart.cs new file mode 100644 index 0000000000..4b31a774aa --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Rar/FileInfoRarFilePart.cs @@ -0,0 +1,28 @@ + +#if !NO_FILE +using System.IO; +using SharpCompress.Common.Rar.Headers; + +namespace SharpCompress.Archives.Rar +{ + internal class FileInfoRarFilePart : SeekableFilePart + { + internal FileInfoRarFilePart(FileInfoRarArchiveVolume volume, string password, MarkHeader mh, FileHeader fh, FileInfo fi) + : base(mh, fh, volume.Stream, password) + { + FileInfo = fi; + } + + internal FileInfo FileInfo { get; } + + internal override string FilePartName + { + get + { + return "Rar File: " + FileInfo.FullName + + " File Entry: " + FileHeader.FileName; + } + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchive.Extensions.cs b/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchive.Extensions.cs new file mode 100644 index 0000000000..7eefef9c07 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchive.Extensions.cs @@ -0,0 +1,23 @@ +using System.Linq; + +namespace SharpCompress.Archives.Rar +{ + public static class RarArchiveExtensions + { + /// + /// RarArchive is the first volume of a multi-part archive. If MultipartVolume is true and IsFirstVolume is false then the first volume file must be missing. + /// + public static bool IsFirstVolume(this RarArchive archive) + { + return archive.Volumes.First().IsFirstVolume; + } + + /// + /// RarArchive is part of a multi-part archive. + /// + public static bool IsMultipartVolume(this RarArchive archive) + { + return archive.Volumes.First().IsMultiVolume; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchive.cs b/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchive.cs new file mode 100644 index 0000000000..52ffaa1c82 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchive.cs @@ -0,0 +1,148 @@ +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Common.Rar; +using SharpCompress.Common.Rar.Headers; +using SharpCompress.Compressors.Rar; +using SharpCompress.Readers; +using SharpCompress.Readers.Rar; + +namespace SharpCompress.Archives.Rar +{ + public class RarArchive : AbstractArchive + { + internal Lazy UnpackV2017 { get; } = new Lazy(() => new SharpCompress.Compressors.Rar.UnpackV2017.Unpack()); + internal Lazy UnpackV1 { get; } = new Lazy(() => new SharpCompress.Compressors.Rar.UnpackV1.Unpack()); + +#if !NO_FILE + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + internal RarArchive(FileInfo fileInfo, ReaderOptions options) + : base(ArchiveType.Rar, fileInfo, options) + { + } + + protected override IEnumerable LoadVolumes(FileInfo file) + { + return RarArchiveVolumeFactory.GetParts(file, ReaderOptions); + } +#endif + + /// + /// Takes multiple seekable Streams for a multi-part archive + /// + /// + /// + internal RarArchive(IEnumerable streams, ReaderOptions options) + : base(ArchiveType.Rar, streams, options) + { + } + + protected override IEnumerable LoadEntries(IEnumerable volumes) + { + return RarArchiveEntryFactory.GetEntries(this, volumes); + } + + protected override IEnumerable LoadVolumes(IEnumerable streams) + { + return RarArchiveVolumeFactory.GetParts(streams, ReaderOptions); + } + + protected override IReader CreateReaderForSolidExtraction() + { + var stream = Volumes.First().Stream; + stream.Position = 0; + return RarReader.Open(stream, ReaderOptions); + } + + public override bool IsSolid => Volumes.First().IsSolidArchive; + + #region Creation + +#if !NO_FILE + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + public static RarArchive Open(string filePath, ReaderOptions options = null) + { + filePath.CheckNotNullOrEmpty("filePath"); + return new RarArchive(new FileInfo(filePath), options ?? new ReaderOptions()); + } + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + public static RarArchive Open(FileInfo fileInfo, ReaderOptions options = null) + { + fileInfo.CheckNotNull("fileInfo"); + return new RarArchive(fileInfo, options ?? new ReaderOptions()); + } +#endif + + /// + /// Takes a seekable Stream as a source + /// + /// + /// + public static RarArchive Open(Stream stream, ReaderOptions options = null) + { + stream.CheckNotNull("stream"); + return Open(stream.AsEnumerable(), options ?? new ReaderOptions()); + } + + /// + /// Takes multiple seekable Streams for a multi-part archive + /// + /// + /// + public static RarArchive Open(IEnumerable streams, ReaderOptions options = null) + { + streams.CheckNotNull("streams"); + return new RarArchive(streams, options ?? new ReaderOptions()); + } + +#if !NO_FILE + public static bool IsRarFile(string filePath) + { + return IsRarFile(new FileInfo(filePath)); + } + + public static bool IsRarFile(FileInfo fileInfo) + { + if (!fileInfo.Exists) + { + return false; + } + using (Stream stream = fileInfo.OpenRead()) + { + return IsRarFile(stream); + } + } +#endif + + public static bool IsRarFile(Stream stream, ReaderOptions options = null) + { + try + { + MarkHeader.Read(stream, true, false); + return true; + } + catch + { + return false; + } + } + + #endregion + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveEntry.cs b/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveEntry.cs new file mode 100644 index 0000000000..3f2360b2b8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveEntry.cs @@ -0,0 +1,89 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Common.Rar; +using SharpCompress.Common.Rar.Headers; +using SharpCompress.Compressors.Rar; + +namespace SharpCompress.Archives.Rar +{ + public class RarArchiveEntry : RarEntry, IArchiveEntry + { + private readonly ICollection parts; + private readonly RarArchive archive; + + internal RarArchiveEntry(RarArchive archive, IEnumerable parts) + { + this.parts = parts.ToList(); + this.archive = archive; + } + + public override CompressionType CompressionType => CompressionType.Rar; + + public IArchive Archive => archive; + + internal override IEnumerable Parts => parts.Cast(); + + internal override FileHeader FileHeader => parts.First().FileHeader; + + public override long Crc + { + get + { + CheckIncomplete(); + return parts.Select(fp => fp.FileHeader).Single(fh => !fh.IsSplitAfter).FileCrc; + } + } + + public override long Size + { + get + { + CheckIncomplete(); + return parts.First().FileHeader.UncompressedSize; + } + } + + public override long CompressedSize + { + get + { + CheckIncomplete(); + return parts.Aggregate(0L, (total, fp) => total + fp.FileHeader.CompressedSize); + } + } + + public Stream OpenEntryStream() + { + if (archive.IsSolid) + { + throw new InvalidOperationException("Use ExtractAllEntries to extract SOLID archives."); + } + + if (IsRarV3) + { + return new RarStream(archive.UnpackV1.Value, FileHeader, new MultiVolumeReadOnlyStream(Parts.Cast(), archive)); + } + + return new RarStream(archive.UnpackV2017.Value, FileHeader, new MultiVolumeReadOnlyStream(Parts.Cast(), archive)); + } + + public bool IsComplete + { + get + { + return parts.Select(fp => fp.FileHeader).Any(fh => !fh.IsSplitAfter); + } + } + + private void CheckIncomplete() + { + if (!IsComplete) + { + throw new IncompleteArchiveException("ArchiveEntry is incomplete and cannot perform this operation."); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveEntryFactory.cs b/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveEntryFactory.cs new file mode 100644 index 0000000000..e41c024dcd --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveEntryFactory.cs @@ -0,0 +1,47 @@ +using System.Collections.Generic; +using SharpCompress.Common.Rar; + +namespace SharpCompress.Archives.Rar +{ + internal static class RarArchiveEntryFactory + { + private static IEnumerable GetFileParts(IEnumerable parts) + { + foreach (RarVolume rarPart in parts) + { + foreach (RarFilePart fp in rarPart.ReadFileParts()) + { + yield return fp; + } + } + } + + private static IEnumerable> GetMatchedFileParts(IEnumerable parts) + { + var groupedParts = new List(); + foreach (RarFilePart fp in GetFileParts(parts)) + { + groupedParts.Add(fp); + + if (!fp.FileHeader.IsSplitAfter) + { + yield return groupedParts; + groupedParts = new List(); + } + } + if (groupedParts.Count > 0) + { + yield return groupedParts; + } + } + + internal static IEnumerable GetEntries(RarArchive archive, + IEnumerable rarParts) + { + foreach (var groupedParts in GetMatchedFileParts(rarParts)) + { + yield return new RarArchiveEntry(archive, groupedParts); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveVolumeFactory.cs b/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveVolumeFactory.cs new file mode 100644 index 0000000000..57eb3c694f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Rar/RarArchiveVolumeFactory.cs @@ -0,0 +1,147 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common.Rar; +using SharpCompress.Readers; +#if !NO_FILE +using System.Linq; +using System.Text; +using SharpCompress.Common.Rar.Headers; +#endif + +namespace SharpCompress.Archives.Rar +{ + internal static class RarArchiveVolumeFactory + { + internal static IEnumerable GetParts(IEnumerable streams, ReaderOptions options) + { + foreach (Stream s in streams) + { + if (!s.CanRead || !s.CanSeek) + { + throw new ArgumentException("Stream is not readable and seekable"); + } + StreamRarArchiveVolume part = new StreamRarArchiveVolume(s, options); + yield return part; + } + } + +#if !NO_FILE + internal static IEnumerable GetParts(FileInfo fileInfo, ReaderOptions options) + { + FileInfoRarArchiveVolume part = new FileInfoRarArchiveVolume(fileInfo, options); + yield return part; + + ArchiveHeader ah = part.ArchiveHeader; + if (!ah.IsVolume) + { + yield break; //if file isn't volume then there is no reason to look + } + fileInfo = GetNextFileInfo(ah, part.FileParts.FirstOrDefault() as FileInfoRarFilePart); + //we use fileinfo because rar is dumb and looks at file names rather than archive info for another volume + while (fileInfo != null && fileInfo.Exists) + { + part = new FileInfoRarArchiveVolume(fileInfo, options); + + fileInfo = GetNextFileInfo(ah, part.FileParts.FirstOrDefault() as FileInfoRarFilePart); + yield return part; + } + } + + private static FileInfo GetNextFileInfo(ArchiveHeader ah, FileInfoRarFilePart currentFilePart) + { + if (currentFilePart == null) + { + return null; + } + bool oldNumbering = ah.OldNumberingFormat + || currentFilePart.MarkHeader.OldNumberingFormat; + if (oldNumbering) + { + return FindNextFileWithOldNumbering(currentFilePart.FileInfo); + } + else + { + return FindNextFileWithNewNumbering(currentFilePart.FileInfo); + } + } + + private static FileInfo FindNextFileWithOldNumbering(FileInfo currentFileInfo) + { + // .rar, .r00, .r01, ... + string extension = currentFileInfo.Extension; + + StringBuilder buffer = new StringBuilder(currentFileInfo.FullName.Length); + buffer.Append(currentFileInfo.FullName.Substring(0, + currentFileInfo.FullName.Length - extension.Length)); + if (string.Compare(extension, ".rar", StringComparison.OrdinalIgnoreCase) == 0) + { + buffer.Append(".r00"); + } + else + { + int num = 0; + if (int.TryParse(extension.Substring(2, 2), out num)) + { + num++; + buffer.Append(".r"); + if (num < 10) + { + buffer.Append('0'); + } + buffer.Append(num); + } + else + { + ThrowInvalidFileName(currentFileInfo); + } + } + return new FileInfo(buffer.ToString()); + } + + private static FileInfo FindNextFileWithNewNumbering(FileInfo currentFileInfo) + { + // part1.rar, part2.rar, ... + string extension = currentFileInfo.Extension; + if (string.Compare(extension, ".rar", StringComparison.OrdinalIgnoreCase) != 0) + { + throw new ArgumentException("Invalid extension, expected 'rar': " + currentFileInfo.FullName); + } + int startIndex = currentFileInfo.FullName.LastIndexOf(".part"); + if (startIndex < 0) + { + ThrowInvalidFileName(currentFileInfo); + } + StringBuilder buffer = new StringBuilder(currentFileInfo.FullName.Length); + buffer.Append(currentFileInfo.FullName, 0, startIndex); + int num = 0; + string numString = currentFileInfo.FullName.Substring(startIndex + 5, + currentFileInfo.FullName.IndexOf('.', startIndex + 5) - + startIndex - 5); + buffer.Append(".part"); + if (int.TryParse(numString, out num)) + { + num++; + for (int i = 0; i < numString.Length - num.ToString().Length; i++) + { + buffer.Append('0'); + } + buffer.Append(num); + } + else + { + ThrowInvalidFileName(currentFileInfo); + } + buffer.Append(".rar"); + return new FileInfo(buffer.ToString()); + } + + private static void ThrowInvalidFileName(FileInfo fileInfo) + { + throw new ArgumentException("Filename invalid or next archive could not be found:" + + fileInfo.FullName); + } + +#endif + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Rar/SeekableFilePart.cs b/BizHawk.Client.Common/SharpCompress/Archives/Rar/SeekableFilePart.cs new file mode 100644 index 0000000000..b7d3affcb9 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Rar/SeekableFilePart.cs @@ -0,0 +1,33 @@ +using System.IO; +using SharpCompress.Common.Rar; +using SharpCompress.Common.Rar.Headers; + +namespace SharpCompress.Archives.Rar +{ + internal class SeekableFilePart : RarFilePart + { + private readonly Stream stream; + private readonly string password; + + internal SeekableFilePart(MarkHeader mh, FileHeader fh, Stream stream, string password) + : base(mh, fh) + { + this.stream = stream; + this.password = password; + } + + internal override Stream GetCompressedStream() + { + stream.Position = FileHeader.DataStartPosition; +#if !NO_CRYPTO + if (FileHeader.R4Salt != null) + { + return new RarCryptoWrapper(stream, password, FileHeader.R4Salt); + } +#endif + return stream; + } + + internal override string FilePartName => "Unknown Stream - File Entry: " + FileHeader.FileName; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Rar/StreamRarArchiveVolume.cs b/BizHawk.Client.Common/SharpCompress/Archives/Rar/StreamRarArchiveVolume.cs new file mode 100644 index 0000000000..92602ae9a5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Rar/StreamRarArchiveVolume.cs @@ -0,0 +1,27 @@ +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common.Rar; +using SharpCompress.Common.Rar.Headers; +using SharpCompress.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Archives.Rar +{ + internal class StreamRarArchiveVolume : RarVolume + { + internal StreamRarArchiveVolume(Stream stream, ReaderOptions options) + : base(StreamingMode.Seekable, stream, options) + { + } + + internal override IEnumerable ReadFileParts() + { + return GetVolumeFileParts(); + } + + internal override RarFilePart CreateFilePart(MarkHeader markHeader, FileHeader fileHeader) + { + return new SeekableFilePart(markHeader, fileHeader, Stream, ReaderOptions.Password); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/SevenZip/SevenZipArchive.cs b/BizHawk.Client.Common/SharpCompress/Archives/SevenZip/SevenZipArchive.cs new file mode 100644 index 0000000000..9cf6414281 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/SevenZip/SevenZipArchive.cs @@ -0,0 +1,226 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Common.SevenZip; +using SharpCompress.Compressors.LZMA.Utilites; +using SharpCompress.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Archives.SevenZip +{ + public class SevenZipArchive : AbstractArchive + { + private ArchiveDatabase database; +#if !NO_FILE + + /// + /// Constructor expects a filepath to an existing file. + /// + /// + /// + public static SevenZipArchive Open(string filePath, ReaderOptions readerOptions = null) + { + filePath.CheckNotNullOrEmpty("filePath"); + return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions()); + } + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + public static SevenZipArchive Open(FileInfo fileInfo, ReaderOptions readerOptions = null) + { + fileInfo.CheckNotNull("fileInfo"); + return new SevenZipArchive(fileInfo, readerOptions ?? new ReaderOptions()); + } +#endif + /// + /// Takes a seekable Stream as a source + /// + /// + /// + public static SevenZipArchive Open(Stream stream, ReaderOptions readerOptions = null) + { + stream.CheckNotNull("stream"); + return new SevenZipArchive(stream, readerOptions ?? new ReaderOptions()); + } + +#if !NO_FILE + internal SevenZipArchive(FileInfo fileInfo, ReaderOptions readerOptions) + : base(ArchiveType.SevenZip, fileInfo, readerOptions) + { + } + + protected override IEnumerable LoadVolumes(FileInfo file) + { + return new SevenZipVolume(file.OpenRead(), ReaderOptions).AsEnumerable(); + } + + public static bool IsSevenZipFile(string filePath) + { + return IsSevenZipFile(new FileInfo(filePath)); + } + + public static bool IsSevenZipFile(FileInfo fileInfo) + { + if (!fileInfo.Exists) + { + return false; + } + using (Stream stream = fileInfo.OpenRead()) + { + return IsSevenZipFile(stream); + } + } +#endif + + internal SevenZipArchive(Stream stream, ReaderOptions readerOptions) + : base(ArchiveType.SevenZip, stream.AsEnumerable(), readerOptions) + { + } + + internal SevenZipArchive() + : base(ArchiveType.SevenZip) + { + } + + protected override IEnumerable LoadVolumes(IEnumerable streams) + { + foreach (Stream s in streams) + { + if (!s.CanRead || !s.CanSeek) + { + throw new ArgumentException("Stream is not readable and seekable"); + } + SevenZipVolume volume = new SevenZipVolume(s, ReaderOptions); + yield return volume; + } + } + + protected override IEnumerable LoadEntries(IEnumerable volumes) + { + var stream = volumes.Single().Stream; + LoadFactory(stream); + for (int i = 0; i < database._files.Count; i++) + { + var file = database._files[i]; + yield return new SevenZipArchiveEntry(this, new SevenZipFilePart(stream, database, i, file, ReaderOptions.ArchiveEncoding)); + } + } + + private void LoadFactory(Stream stream) + { + if (database == null) + { + stream.Position = 0; + var reader = new ArchiveReader(); + reader.Open(stream); + database = reader.ReadDatabase(new PasswordProvider(ReaderOptions.Password)); + } + } + + public static bool IsSevenZipFile(Stream stream) + { + try + { + return SignatureMatch(stream); + } + catch + { + return false; + } + } + + private static readonly byte[] SIGNATURE = {(byte)'7', (byte)'z', 0xBC, 0xAF, 0x27, 0x1C}; + + private static bool SignatureMatch(Stream stream) + { + BinaryReader reader = new BinaryReader(stream); + byte[] signatureBytes = reader.ReadBytes(6); + return signatureBytes.BinaryEquals(SIGNATURE); + } + + protected override IReader CreateReaderForSolidExtraction() + { + return new SevenZipReader(ReaderOptions, this); + } + + public override bool IsSolid { get { return Entries.Where(x => !x.IsDirectory).GroupBy(x => x.FilePart.Folder).Count() > 1; } } + + public override long TotalSize + { + get + { + int i = Entries.Count; + return database._packSizes.Aggregate(0L, (total, packSize) => total + packSize); + } + } + + private class SevenZipReader : AbstractReader + { + private readonly SevenZipArchive archive; + private CFolder currentFolder; + private Stream currentStream; + private CFileItem currentItem; + + internal SevenZipReader(ReaderOptions readerOptions, SevenZipArchive archive) + : base(readerOptions, ArchiveType.SevenZip) + { + this.archive = archive; + } + + public override SevenZipVolume Volume => archive.Volumes.Single(); + + protected override IEnumerable GetEntries(Stream stream) + { + List entries = archive.Entries.ToList(); + stream.Position = 0; + foreach (var dir in entries.Where(x => x.IsDirectory)) + { + yield return dir; + } + foreach (var group in entries.Where(x => !x.IsDirectory).GroupBy(x => x.FilePart.Folder)) + { + currentFolder = group.Key; + if (group.Key == null) + { + currentStream = Stream.Null; + } + else + { + currentStream = archive.database.GetFolderStream(stream, currentFolder, new PasswordProvider(Options.Password)); + } + foreach (var entry in group) + { + currentItem = entry.FilePart.Header; + yield return entry; + } + } + } + + protected override EntryStream GetEntryStream() + { + return CreateEntryStream(new ReadOnlySubStream(currentStream, currentItem.Size)); + } + } + + private class PasswordProvider : IPasswordProvider + { + private readonly string _password; + + public PasswordProvider(string password) + { + _password = password; + + } + + public string CryptoGetTextPassword() + { + return _password; + } + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Archives/SevenZip/SevenZipArchiveEntry.cs b/BizHawk.Client.Common/SharpCompress/Archives/SevenZip/SevenZipArchiveEntry.cs new file mode 100644 index 0000000000..ea80b5cbfd --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/SevenZip/SevenZipArchiveEntry.cs @@ -0,0 +1,28 @@ +using System.IO; +using SharpCompress.Common.SevenZip; + +namespace SharpCompress.Archives.SevenZip +{ + public class SevenZipArchiveEntry : SevenZipEntry, IArchiveEntry + { + internal SevenZipArchiveEntry(SevenZipArchive archive, SevenZipFilePart part) + : base(part) + { + Archive = archive; + } + + public Stream OpenEntryStream() + { + return FilePart.GetCompressedStream(); + } + + public IArchive Archive { get; } + + public bool IsComplete => true; + + /// + /// This is a 7Zip Anti item + /// + public bool IsAnti => FilePart.Header.IsAnti; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Tar/TarArchive.cs b/BizHawk.Client.Common/SharpCompress/Archives/Tar/TarArchive.cs new file mode 100644 index 0000000000..2ac7ce446c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Tar/TarArchive.cs @@ -0,0 +1,206 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Common.Tar; +using SharpCompress.Common.Tar.Headers; +using SharpCompress.IO; +using SharpCompress.Readers; +using SharpCompress.Readers.Tar; +using SharpCompress.Writers; +using SharpCompress.Writers.Tar; + +namespace SharpCompress.Archives.Tar +{ + public class TarArchive : AbstractWritableArchive + { +#if !NO_FILE + + /// + /// Constructor expects a filepath to an existing file. + /// + /// + /// + public static TarArchive Open(string filePath, ReaderOptions readerOptions = null) + { + filePath.CheckNotNullOrEmpty("filePath"); + return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions()); + } + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + public static TarArchive Open(FileInfo fileInfo, ReaderOptions readerOptions = null) + { + fileInfo.CheckNotNull("fileInfo"); + return new TarArchive(fileInfo, readerOptions ?? new ReaderOptions()); + } +#endif + + /// + /// Takes a seekable Stream as a source + /// + /// + /// + public static TarArchive Open(Stream stream, ReaderOptions readerOptions = null) + { + stream.CheckNotNull("stream"); + return new TarArchive(stream, readerOptions ?? new ReaderOptions()); + } + +#if !NO_FILE + + public static bool IsTarFile(string filePath) + { + return IsTarFile(new FileInfo(filePath)); + } + + public static bool IsTarFile(FileInfo fileInfo) + { + if (!fileInfo.Exists) + { + return false; + } + using (Stream stream = fileInfo.OpenRead()) + { + return IsTarFile(stream); + } + } +#endif + + public static bool IsTarFile(Stream stream) + { + try + { + TarHeader tarHeader = new TarHeader(new ArchiveEncoding()); + bool readSucceeded = tarHeader.Read(new BinaryReader(stream)); + bool isEmptyArchive = tarHeader.Name.Length == 0 && tarHeader.Size == 0 && Enum.IsDefined(typeof(EntryType), tarHeader.EntryType); + return readSucceeded || isEmptyArchive; + } + catch + { + } + return false; + } + +#if !NO_FILE + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + internal TarArchive(FileInfo fileInfo, ReaderOptions readerOptions) + : base(ArchiveType.Tar, fileInfo, readerOptions) + { + } + + protected override IEnumerable LoadVolumes(FileInfo file) + { + return new TarVolume(file.OpenRead(), ReaderOptions).AsEnumerable(); + } +#endif + + /// + /// Takes multiple seekable Streams for a multi-part archive + /// + /// + /// + internal TarArchive(Stream stream, ReaderOptions readerOptions) + : base(ArchiveType.Tar, stream, readerOptions) + { + } + + internal TarArchive() + : base(ArchiveType.Tar) + { + } + + protected override IEnumerable LoadVolumes(IEnumerable streams) + { + return new TarVolume(streams.First(), ReaderOptions).AsEnumerable(); + } + + protected override IEnumerable LoadEntries(IEnumerable volumes) + { + Stream stream = volumes.Single().Stream; + TarHeader previousHeader = null; + foreach (TarHeader header in TarHeaderFactory.ReadHeader(StreamingMode.Seekable, stream, ReaderOptions.ArchiveEncoding)) + { + if (header != null) + { + if (header.EntryType == EntryType.LongName) + { + previousHeader = header; + } + else + { + if (previousHeader != null) + { + var entry = new TarArchiveEntry(this, new TarFilePart(previousHeader, stream), + CompressionType.None); + + var oldStreamPos = stream.Position; + + using (var entryStream = entry.OpenEntryStream()) + { + using (var memoryStream = new MemoryStream()) + { + entryStream.TransferTo(memoryStream); + memoryStream.Position = 0; + var bytes = memoryStream.ToArray(); + + header.Name = ReaderOptions.ArchiveEncoding.Decode(bytes).TrimNulls(); + } + } + + stream.Position = oldStreamPos; + + previousHeader = null; + } + yield return new TarArchiveEntry(this, new TarFilePart(header, stream), CompressionType.None); + } + } + } + } + + public static TarArchive Create() + { + return new TarArchive(); + } + + protected override TarArchiveEntry CreateEntryInternal(string filePath, Stream source, + long size, DateTime? modified, bool closeStream) + { + return new TarWritableArchiveEntry(this, source, CompressionType.Unknown, filePath, size, modified, + closeStream); + } + + protected override void SaveTo(Stream stream, WriterOptions options, + IEnumerable oldEntries, + IEnumerable newEntries) + { + using (var writer = new TarWriter(stream, new TarWriterOptions(options))) + { + foreach (var entry in oldEntries.Concat(newEntries) + .Where(x => !x.IsDirectory)) + { + using (var entryStream = entry.OpenEntryStream()) + { + writer.Write(entry.Key, entryStream, entry.LastModifiedTime, entry.Size); + } + } + } + } + + protected override IReader CreateReaderForSolidExtraction() + { + var stream = Volumes.Single().Stream; + stream.Position = 0; + return TarReader.Open(stream); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Tar/TarArchiveEntry.cs b/BizHawk.Client.Common/SharpCompress/Archives/Tar/TarArchiveEntry.cs new file mode 100644 index 0000000000..51a0a49bfb --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Tar/TarArchiveEntry.cs @@ -0,0 +1,29 @@ +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Common.Tar; + +namespace SharpCompress.Archives.Tar +{ + public class TarArchiveEntry : TarEntry, IArchiveEntry + { + internal TarArchiveEntry(TarArchive archive, TarFilePart part, CompressionType compressionType) + : base(part, compressionType) + { + Archive = archive; + } + + public virtual Stream OpenEntryStream() + { + return Parts.Single().GetCompressedStream(); + } + + #region IArchiveEntry Members + + public IArchive Archive { get; } + + public bool IsComplete => true; + + #endregion + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Tar/TarWritableArchiveEntry.cs b/BizHawk.Client.Common/SharpCompress/Archives/Tar/TarWritableArchiveEntry.cs new file mode 100644 index 0000000000..8e693d523c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Tar/TarWritableArchiveEntry.cs @@ -0,0 +1,65 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common; +using SharpCompress.IO; + +namespace SharpCompress.Archives.Tar +{ + internal class TarWritableArchiveEntry : TarArchiveEntry, IWritableArchiveEntry + { + private readonly bool closeStream; + private readonly Stream stream; + + internal TarWritableArchiveEntry(TarArchive archive, Stream stream, CompressionType compressionType, + string path, long size, DateTime? lastModified, bool closeStream) + : base(archive, null, compressionType) + { + this.stream = stream; + Key = path; + Size = size; + LastModifiedTime = lastModified; + this.closeStream = closeStream; + } + + public override long Crc => 0; + + public override string Key { get; } + + public override long CompressedSize => 0; + + public override long Size { get; } + + public override DateTime? LastModifiedTime { get; } + + public override DateTime? CreatedTime => null; + + public override DateTime? LastAccessedTime => null; + + public override DateTime? ArchivedTime => null; + + public override bool IsEncrypted => false; + + public override bool IsDirectory => false; + + public override bool IsSplitAfter => false; + + internal override IEnumerable Parts => throw new NotImplementedException(); + Stream IWritableArchiveEntry.Stream => stream; + + public override Stream OpenEntryStream() + { + //ensure new stream is at the start, this could be reset + stream.Seek(0, SeekOrigin.Begin); + return new NonDisposingStream(stream); + } + + internal override void Close() + { + if (closeStream) + { + stream.Dispose(); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipArchive.cs b/BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipArchive.cs new file mode 100644 index 0000000000..f0889668c4 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipArchive.cs @@ -0,0 +1,214 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Common.Zip; +using SharpCompress.Common.Zip.Headers; +using SharpCompress.Compressors.Deflate; +using SharpCompress.Readers; +using SharpCompress.Readers.Zip; +using SharpCompress.Writers; +using SharpCompress.Writers.Zip; + +namespace SharpCompress.Archives.Zip +{ + public class ZipArchive : AbstractWritableArchive + { + private readonly SeekableZipHeaderFactory headerFactory; + + /// + /// Gets or sets the compression level applied to files added to the archive, + /// if the compression method is set to deflate + /// + public CompressionLevel DeflateCompressionLevel { get; set; } + +#if !NO_FILE + + /// + /// Constructor expects a filepath to an existing file. + /// + /// + /// + public static ZipArchive Open(string filePath, ReaderOptions readerOptions = null) + { + filePath.CheckNotNullOrEmpty("filePath"); + return Open(new FileInfo(filePath), readerOptions ?? new ReaderOptions()); + } + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + public static ZipArchive Open(FileInfo fileInfo, ReaderOptions readerOptions = null) + { + fileInfo.CheckNotNull("fileInfo"); + return new ZipArchive(fileInfo, readerOptions ?? new ReaderOptions()); + } +#endif + + /// + /// Takes a seekable Stream as a source + /// + /// + /// + public static ZipArchive Open(Stream stream, ReaderOptions readerOptions = null) + { + stream.CheckNotNull("stream"); + return new ZipArchive(stream, readerOptions ?? new ReaderOptions()); + } + +#if !NO_FILE + + public static bool IsZipFile(string filePath, string password = null) + { + return IsZipFile(new FileInfo(filePath), password); + } + + public static bool IsZipFile(FileInfo fileInfo, string password = null) + { + if (!fileInfo.Exists) + { + return false; + } + using (Stream stream = fileInfo.OpenRead()) + { + return IsZipFile(stream, password); + } + } +#endif + + public static bool IsZipFile(Stream stream, string password = null) + { + StreamingZipHeaderFactory headerFactory = new StreamingZipHeaderFactory(password, new ArchiveEncoding()); + try + { + ZipHeader header = + headerFactory.ReadStreamHeader(stream).FirstOrDefault(x => x.ZipHeaderType != ZipHeaderType.Split); + if (header == null) + { + return false; + } + return Enum.IsDefined(typeof(ZipHeaderType), header.ZipHeaderType); + } + catch (CryptographicException) + { + return true; + } + catch + { + return false; + } + } + +#if !NO_FILE + + /// + /// Constructor with a FileInfo object to an existing file. + /// + /// + /// + internal ZipArchive(FileInfo fileInfo, ReaderOptions readerOptions) + : base(ArchiveType.Zip, fileInfo, readerOptions) + { + headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding); + } + + protected override IEnumerable LoadVolumes(FileInfo file) + { + return new ZipVolume(file.OpenRead(), ReaderOptions).AsEnumerable(); + } +#endif + + internal ZipArchive() + : base(ArchiveType.Zip) + { + } + + /// + /// Takes multiple seekable Streams for a multi-part archive + /// + /// + /// + internal ZipArchive(Stream stream, ReaderOptions readerOptions) + : base(ArchiveType.Zip, stream, readerOptions) + { + headerFactory = new SeekableZipHeaderFactory(readerOptions.Password, readerOptions.ArchiveEncoding); + } + + protected override IEnumerable LoadVolumes(IEnumerable streams) + { + return new ZipVolume(streams.First(), ReaderOptions).AsEnumerable(); + } + + protected override IEnumerable LoadEntries(IEnumerable volumes) + { + var volume = volumes.Single(); + Stream stream = volume.Stream; + foreach (ZipHeader h in headerFactory.ReadSeekableHeader(stream)) + { + if (h != null) + { + switch (h.ZipHeaderType) + { + case ZipHeaderType.DirectoryEntry: + { + yield return new ZipArchiveEntry(this, + new SeekableZipFilePart(headerFactory, + h as DirectoryEntryHeader, + stream)); + } + break; + case ZipHeaderType.DirectoryEnd: + { + byte[] bytes = (h as DirectoryEndHeader).Comment; + volume.Comment = ReaderOptions.ArchiveEncoding.Decode(bytes); + yield break; + } + } + } + } + } + + public void SaveTo(Stream stream) + { + SaveTo(stream, new WriterOptions(CompressionType.Deflate)); + } + + protected override void SaveTo(Stream stream, WriterOptions options, + IEnumerable oldEntries, + IEnumerable newEntries) + { + using (var writer = new ZipWriter(stream, new ZipWriterOptions(options))) + { + foreach (var entry in oldEntries.Concat(newEntries) + .Where(x => !x.IsDirectory)) + { + using (var entryStream = entry.OpenEntryStream()) + { + writer.Write(entry.Key, entryStream, entry.LastModifiedTime); + } + } + } + } + + protected override ZipArchiveEntry CreateEntryInternal(string filePath, Stream source, long size, DateTime? modified, + bool closeStream) + { + return new ZipWritableArchiveEntry(this, source, filePath, size, modified, closeStream); + } + + public static ZipArchive Create() + { + return new ZipArchive(); + } + + protected override IReader CreateReaderForSolidExtraction() + { + var stream = Volumes.Single().Stream; + stream.Position = 0; + return ZipReader.Open(stream, ReaderOptions); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipArchiveEntry.cs b/BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipArchiveEntry.cs new file mode 100644 index 0000000000..2f1f80f2cc --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipArchiveEntry.cs @@ -0,0 +1,30 @@ +using System.IO; +using System.Linq; +using SharpCompress.Common.Zip; + +namespace SharpCompress.Archives.Zip +{ + public class ZipArchiveEntry : ZipEntry, IArchiveEntry + { + internal ZipArchiveEntry(ZipArchive archive, SeekableZipFilePart part) + : base(part) + { + Archive = archive; + } + + public virtual Stream OpenEntryStream() + { + return Parts.Single().GetCompressedStream(); + } + + #region IArchiveEntry Members + + public IArchive Archive { get; } + + public bool IsComplete => true; + + #endregion + + public string Comment => (Parts.Single() as SeekableZipFilePart).Comment; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipWritableArchiveEntry.cs b/BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipWritableArchiveEntry.cs new file mode 100644 index 0000000000..4cd1fe6140 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Archives/Zip/ZipWritableArchiveEntry.cs @@ -0,0 +1,68 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common; +using SharpCompress.IO; + +namespace SharpCompress.Archives.Zip +{ + internal class ZipWritableArchiveEntry : ZipArchiveEntry, IWritableArchiveEntry + { + private readonly bool closeStream; + private readonly Stream stream; + private bool isDisposed; + + internal ZipWritableArchiveEntry(ZipArchive archive, Stream stream, string path, long size, + DateTime? lastModified, bool closeStream) + : base(archive, null) + { + this.stream = stream; + Key = path; + Size = size; + LastModifiedTime = lastModified; + this.closeStream = closeStream; + } + + public override long Crc => 0; + + public override string Key { get; } + + public override long CompressedSize => 0; + + public override long Size { get; } + + public override DateTime? LastModifiedTime { get; } + + public override DateTime? CreatedTime => null; + + public override DateTime? LastAccessedTime => null; + + public override DateTime? ArchivedTime => null; + + public override bool IsEncrypted => false; + + public override bool IsDirectory => false; + + public override bool IsSplitAfter => false; + + internal override IEnumerable Parts => throw new NotImplementedException(); + + Stream IWritableArchiveEntry.Stream => stream; + + public override Stream OpenEntryStream() + { + //ensure new stream is at the start, this could be reset + stream.Seek(0, SeekOrigin.Begin); + return new NonDisposingStream(stream); + } + + internal override void Close() + { + if (closeStream && !isDisposed) + { + stream.Dispose(); + isDisposed = true; + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Buffers/ArrayPool.cs b/BizHawk.Client.Common/SharpCompress/Buffers/ArrayPool.cs new file mode 100644 index 0000000000..d81ed4c07b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Buffers/ArrayPool.cs @@ -0,0 +1,119 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. +#if NETCORE +using System.Runtime.CompilerServices; +using System.Threading; + +namespace SharpCompress.Buffers +{ + /// + /// Provides a resource pool that enables reusing instances of type . + /// + /// + /// + /// Renting and returning buffers with an can increase performance + /// in situations where arrays are created and destroyed frequently, resulting in significant + /// memory pressure on the garbage collector. + /// + /// + /// This class is thread-safe. All members may be used by multiple threads concurrently. + /// + /// + internal abstract class ArrayPool + { + /// The lazily-initialized shared pool instance. + private static ArrayPool s_sharedInstance = null; + + /// + /// Retrieves a shared instance. + /// + /// + /// The shared pool provides a default implementation of + /// that's intended for general applicability. It maintains arrays of multiple sizes, and + /// may hand back a larger array than was actually requested, but will never hand back a smaller + /// array than was requested. Renting a buffer from it with will result in an + /// existing buffer being taken from the pool if an appropriate buffer is available or in a new + /// buffer being allocated if one is not available. + /// + public static ArrayPool Shared + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get { return Volatile.Read(ref s_sharedInstance) ?? EnsureSharedCreated(); } + } + + /// Ensures that has been initialized to a pool and returns it. + [MethodImpl(MethodImplOptions.NoInlining)] + private static ArrayPool EnsureSharedCreated() + { + Interlocked.CompareExchange(ref s_sharedInstance, Create(), null); + return s_sharedInstance; + } + + /// + /// Creates a new instance using default configuration options. + /// + /// A new instance. + public static ArrayPool Create() + { + return new DefaultArrayPool(); + } + + /// + /// Creates a new instance using custom configuration options. + /// + /// The maximum length of array instances that may be stored in the pool. + /// + /// The maximum number of array instances that may be stored in each bucket in the pool. The pool + /// groups arrays of similar lengths into buckets for faster access. + /// + /// A new instance with the specified configuration options. + /// + /// The created pool will group arrays into buckets, with no more than + /// in each bucket and with those arrays not exceeding in length. + /// + public static ArrayPool Create(int maxArrayLength, int maxArraysPerBucket) + { + return new DefaultArrayPool(maxArrayLength, maxArraysPerBucket); + } + + /// + /// Retrieves a buffer that is at least the requested length. + /// + /// The minimum length of the array needed. + /// + /// An that is at least in length. + /// + /// + /// This buffer is loaned to the caller and should be returned to the same pool via + /// so that it may be reused in subsequent usage of . + /// It is not a fatal error to not return a rented buffer, but failure to do so may lead to + /// decreased application performance, as the pool may need to create a new buffer to replace + /// the one lost. + /// + public abstract T[] Rent(int minimumLength); + + /// + /// Returns to the pool an array that was previously obtained via on the same + /// instance. + /// + /// + /// The buffer previously obtained from to return to the pool. + /// + /// + /// If true and if the pool will store the buffer to enable subsequent reuse, + /// will clear of its contents so that a subsequent consumer via + /// will not see the previous consumer's content. If false or if the pool will release the buffer, + /// the array's contents are left unchanged. + /// + /// + /// Once a buffer has been returned to the pool, the caller gives up all ownership of the buffer + /// and must not use it. The reference returned from a given call to must only be + /// returned via once. The default + /// may hold onto the returned buffer in order to rent it again, or it may release the returned buffer + /// if it's determined that the pool already has enough buffers stored. + /// + public abstract void Return(T[] array, bool clearArray = false); + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Buffers/DefaultArrayPool.cs b/BizHawk.Client.Common/SharpCompress/Buffers/DefaultArrayPool.cs new file mode 100644 index 0000000000..43cd2c37e8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Buffers/DefaultArrayPool.cs @@ -0,0 +1,144 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. +#if NETCORE +using System; + +namespace SharpCompress.Buffers +{ + internal sealed partial class DefaultArrayPool : ArrayPool + { + /// The default maximum length of each array in the pool (2^20). + private const int DefaultMaxArrayLength = 1024 * 1024; + /// The default maximum number of arrays per bucket that are available for rent. + private const int DefaultMaxNumberOfArraysPerBucket = 50; + /// Lazily-allocated empty array used when arrays of length 0 are requested. + private static T[] s_emptyArray; // we support contracts earlier than those with Array.Empty() + + private readonly Bucket[] _buckets; + + internal DefaultArrayPool() : this(DefaultMaxArrayLength, DefaultMaxNumberOfArraysPerBucket) + { + } + + internal DefaultArrayPool(int maxArrayLength, int maxArraysPerBucket) + { + if (maxArrayLength <= 0) + { + throw new ArgumentOutOfRangeException(nameof(maxArrayLength)); + } + if (maxArraysPerBucket <= 0) + { + throw new ArgumentOutOfRangeException(nameof(maxArraysPerBucket)); + } + + // Our bucketing algorithm has a min length of 2^4 and a max length of 2^30. + // Constrain the actual max used to those values. + const int MinimumArrayLength = 0x10, MaximumArrayLength = 0x40000000; + if (maxArrayLength > MaximumArrayLength) + { + maxArrayLength = MaximumArrayLength; + } + else if (maxArrayLength < MinimumArrayLength) + { + maxArrayLength = MinimumArrayLength; + } + + // Create the buckets. + int poolId = Id; + int maxBuckets = Utilities.SelectBucketIndex(maxArrayLength); + var buckets = new Bucket[maxBuckets + 1]; + for (int i = 0; i < buckets.Length; i++) + { + buckets[i] = new Bucket(Utilities.GetMaxSizeForBucket(i), maxArraysPerBucket, poolId); + } + _buckets = buckets; + } + + /// Gets an ID for the pool to use with events. + private int Id => GetHashCode(); + + public override T[] Rent(int minimumLength) + { + // Arrays can't be smaller than zero. We allow requesting zero-length arrays (even though + // pooling such an array isn't valuable) as it's a valid length array, and we want the pool + // to be usable in general instead of using `new`, even for computed lengths. + if (minimumLength < 0) + { + throw new ArgumentOutOfRangeException(nameof(minimumLength)); + } + else if (minimumLength == 0) + { + // No need for events with the empty array. Our pool is effectively infinite + // and we'll never allocate for rents and never store for returns. + return s_emptyArray ?? (s_emptyArray = new T[0]); + } + + T[] buffer = null; + + int index = Utilities.SelectBucketIndex(minimumLength); + if (index < _buckets.Length) + { + // Search for an array starting at the 'index' bucket. If the bucket is empty, bump up to the + // next higher bucket and try that one, but only try at most a few buckets. + const int MaxBucketsToTry = 2; + int i = index; + do + { + // Attempt to rent from the bucket. If we get a buffer from it, return it. + buffer = _buckets[i].Rent(); + if (buffer != null) + { + return buffer; + } + } + while (++i < _buckets.Length && i != index + MaxBucketsToTry); + + // The pool was exhausted for this buffer size. Allocate a new buffer with a size corresponding + // to the appropriate bucket. + buffer = new T[_buckets[index]._bufferLength]; + } + else + { + // The request was for a size too large for the pool. Allocate an array of exactly the requested length. + // When it's returned to the pool, we'll simply throw it away. + buffer = new T[minimumLength]; + } + + return buffer; + } + + public override void Return(T[] array, bool clearArray = false) + { + if (array == null) + { + throw new ArgumentNullException(nameof(array)); + } + else if (array.Length == 0) + { + // Ignore empty arrays. When a zero-length array is rented, we return a singleton + // rather than actually taking a buffer out of the lowest bucket. + return; + } + + // Determine with what bucket this array length is associated + int bucket = Utilities.SelectBucketIndex(array.Length); + + // If we can tell that the buffer was allocated, drop it. Otherwise, check if we have space in the pool + if (bucket < _buckets.Length) + { + // Clear the array if the user requests + if (clearArray) + { + Array.Clear(array, 0, array.Length); + } + + // Return the buffer to its bucket. In the future, we might consider having Return return false + // instead of dropping a bucket, in which case we could try to return to a lower-sized bucket, + // just as how in Rent we allow renting from a higher-sized bucket. + _buckets[bucket].Return(array); + } + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Buffers/DefaultArrayPoolBucket.cs b/BizHawk.Client.Common/SharpCompress/Buffers/DefaultArrayPoolBucket.cs new file mode 100644 index 0000000000..3012488911 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Buffers/DefaultArrayPoolBucket.cs @@ -0,0 +1,111 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +#if NETCORE +using System; +using System.Diagnostics; +using System.Threading; + +namespace SharpCompress.Buffers +{ + internal sealed partial class DefaultArrayPool : ArrayPool + { + /// Provides a thread-safe bucket containing buffers that can be Rent'd and Return'd. + private sealed class Bucket + { + internal readonly int _bufferLength; + private readonly T[][] _buffers; + private readonly int _poolId; + + private SpinLock _lock; // do not make this readonly; it's a mutable struct + private int _index; + + /// + /// Creates the pool with numberOfBuffers arrays where each buffer is of bufferLength length. + /// + internal Bucket(int bufferLength, int numberOfBuffers, int poolId) + { + _lock = new SpinLock(Debugger.IsAttached); // only enable thread tracking if debugger is attached; it adds non-trivial overheads to Enter/Exit + _buffers = new T[numberOfBuffers][]; + _bufferLength = bufferLength; + _poolId = poolId; + } + + /// Gets an ID for the bucket to use with events. + internal int Id => GetHashCode(); + + /// Takes an array from the bucket. If the bucket is empty, returns null. + internal T[] Rent() + { + T[][] buffers = _buffers; + T[] buffer = null; + + // While holding the lock, grab whatever is at the next available index and + // update the index. We do as little work as possible while holding the spin + // lock to minimize contention with other threads. The try/finally is + // necessary to properly handle thread aborts on platforms which have them. + bool lockTaken = false, allocateBuffer = false; + try + { + _lock.Enter(ref lockTaken); + + if (_index < buffers.Length) + { + buffer = buffers[_index]; + buffers[_index++] = null; + allocateBuffer = buffer == null; + } + } + finally + { + if (lockTaken) _lock.Exit(false); + } + + // While we were holding the lock, we grabbed whatever was at the next available index, if + // there was one. If we tried and if we got back null, that means we hadn't yet allocated + // for that slot, in which case we should do so now. + if (allocateBuffer) + { + buffer = new T[_bufferLength]; + } + + return buffer; + } + + /// + /// Attempts to return the buffer to the bucket. If successful, the buffer will be stored + /// in the bucket and true will be returned; otherwise, the buffer won't be stored, and false + /// will be returned. + /// + internal void Return(T[] array) + { + // Check to see if the buffer is the correct size for this bucket + if (array.Length != _bufferLength) + { + throw new ArgumentException("Buffer not from pool", nameof(array)); + } + + // While holding the spin lock, if there's room available in the bucket, + // put the buffer into the next available slot. Otherwise, we just drop it. + // The try/finally is necessary to properly handle thread aborts on platforms + // which have them. + bool lockTaken = false; + try + { + _lock.Enter(ref lockTaken); + + if (_index != 0) + { + _buffers[--_index] = array; + } + } + finally + { + if (lockTaken) _lock.Exit(false); + } + } + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Buffers/Utilities.cs b/BizHawk.Client.Common/SharpCompress/Buffers/Utilities.cs new file mode 100644 index 0000000000..f4100e37d5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Buffers/Utilities.cs @@ -0,0 +1,38 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. +#if NETCORE +using System.Diagnostics; +using System.Runtime.CompilerServices; + +namespace SharpCompress.Buffers +{ + internal static class Utilities + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static int SelectBucketIndex(int bufferSize) + { + Debug.Assert(bufferSize > 0); + + uint bitsRemaining = ((uint)bufferSize - 1) >> 4; + + int poolIndex = 0; + if (bitsRemaining > 0xFFFF) { bitsRemaining >>= 16; poolIndex = 16; } + if (bitsRemaining > 0xFF) { bitsRemaining >>= 8; poolIndex += 8; } + if (bitsRemaining > 0xF) { bitsRemaining >>= 4; poolIndex += 4; } + if (bitsRemaining > 0x3) { bitsRemaining >>= 2; poolIndex += 2; } + if (bitsRemaining > 0x1) { bitsRemaining >>= 1; poolIndex += 1; } + + return poolIndex + (int)bitsRemaining; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static int GetMaxSizeForBucket(int binIndex) + { + int maxSize = 16 << binIndex; + Debug.Assert(maxSize >= 0); + return maxSize; + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/ArchiveEncoding.cs b/BizHawk.Client.Common/SharpCompress/Common/ArchiveEncoding.cs new file mode 100644 index 0000000000..e546503068 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/ArchiveEncoding.cs @@ -0,0 +1,77 @@ +using System; +using System.Text; + +namespace SharpCompress.Common +{ + public class ArchiveEncoding + { + /// + /// Default encoding to use when archive format doesn't specify one. + /// + public Encoding Default { get; set; } + + /// + /// ArchiveEncoding used by encryption schemes which don't comply with RFC 2898. + /// + public Encoding Password { get; set; } + + /// + /// Set this encoding when you want to force it for all encoding operations. + /// + public Encoding Forced { get; set; } + + /// + /// Set this when you want to use a custom method for all decoding operations. + /// + /// string Func(bytes, index, length) + public Func CustomDecoder { get; set; } + + public ArchiveEncoding() + { +#if NETSTANDARD1_0 + Default = Encoding.GetEncoding("cp437"); + Password = Encoding.GetEncoding("cp437"); +#else + Default = Encoding.GetEncoding(437); + Password = Encoding.GetEncoding(437); +#endif + } + +#if NETSTANDARD1_3 || NETSTANDARD2_0 + static ArchiveEncoding() + { + Encoding.RegisterProvider(CodePagesEncodingProvider.Instance); + } +#endif + + public string Decode(byte[] bytes) + { + return Decode(bytes, 0, bytes.Length); + } + + public string Decode(byte[] bytes, int start, int length) + { + return GetDecoder().Invoke(bytes, start, length); + } + + public string DecodeUTF8(byte[] bytes) + { + return Encoding.UTF8.GetString(bytes, 0, bytes.Length); + } + + public byte[] Encode(string str) + { + return GetEncoding().GetBytes(str); + } + + public Encoding GetEncoding() + { + return Forced ?? Default ?? Encoding.UTF8; + } + + public Func GetDecoder() + { + return CustomDecoder ?? ((bytes, index, count) => GetEncoding().GetString(bytes, index, count)); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/ArchiveException.cs b/BizHawk.Client.Common/SharpCompress/Common/ArchiveException.cs new file mode 100644 index 0000000000..18207c641f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/ArchiveException.cs @@ -0,0 +1,12 @@ +using System; + +namespace SharpCompress.Common +{ + public class ArchiveException : Exception + { + public ArchiveException(string message) + : base(message) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/ArchiveExtractionEventArgs.cs b/BizHawk.Client.Common/SharpCompress/Common/ArchiveExtractionEventArgs.cs new file mode 100644 index 0000000000..b1c9fc757b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/ArchiveExtractionEventArgs.cs @@ -0,0 +1,14 @@ +using System; + +namespace SharpCompress.Common +{ + public class ArchiveExtractionEventArgs : EventArgs + { + internal ArchiveExtractionEventArgs(T entry) + { + Item = entry; + } + + public T Item { get; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/ArchiveType.cs b/BizHawk.Client.Common/SharpCompress/Common/ArchiveType.cs new file mode 100644 index 0000000000..a0d3097a24 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/ArchiveType.cs @@ -0,0 +1,11 @@ +namespace SharpCompress.Common +{ + public enum ArchiveType + { + Rar, + Zip, + Tar, + SevenZip, + GZip + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/CompressedBytesReadEventArgs.cs b/BizHawk.Client.Common/SharpCompress/Common/CompressedBytesReadEventArgs.cs new file mode 100644 index 0000000000..fdae9c4d15 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/CompressedBytesReadEventArgs.cs @@ -0,0 +1,17 @@ +using System; + +namespace SharpCompress.Common +{ + public class CompressedBytesReadEventArgs : EventArgs + { + /// + /// Compressed bytes read for the current entry + /// + public long CompressedBytesRead { get; internal set; } + + /// + /// Current file part read for Multipart files (e.g. Rar) + /// + public long CurrentFilePartCompressedBytesRead { get; internal set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/CompressionType.cs b/BizHawk.Client.Common/SharpCompress/Common/CompressionType.cs new file mode 100644 index 0000000000..23ed354fec --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/CompressionType.cs @@ -0,0 +1,19 @@ +namespace SharpCompress.Common +{ + public enum CompressionType + { + None, + GZip, + BZip2, + PPMd, + Deflate, + Rar, + LZMA, + BCJ, + BCJ2, + LZip, + Xz, + Unknown, + Deflate64 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/CryptographicException.cs b/BizHawk.Client.Common/SharpCompress/Common/CryptographicException.cs new file mode 100644 index 0000000000..450cd237b7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/CryptographicException.cs @@ -0,0 +1,12 @@ +using System; + +namespace SharpCompress.Common +{ + public class CryptographicException : Exception + { + public CryptographicException(string message) + : base(message) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Entry.cs b/BizHawk.Client.Common/SharpCompress/Common/Entry.cs new file mode 100644 index 0000000000..c12cdf6f8c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Entry.cs @@ -0,0 +1,91 @@ +using System; +using System.Collections.Generic; + +namespace SharpCompress.Common +{ + public abstract class Entry : IEntry + { + /// + /// The File's 32 bit CRC Hash + /// + public abstract long Crc { get; } + + /// + /// The string key of the file internal to the Archive. + /// + public abstract string Key { get; } + + /// + /// The target of a symlink entry internal to the Archive. Will be null if not a symlink. + /// + public abstract string LinkTarget { get; } + + /// + /// The compressed file size + /// + public abstract long CompressedSize { get; } + + /// + /// The compression type + /// + public abstract CompressionType CompressionType { get; } + + /// + /// The uncompressed file size + /// + public abstract long Size { get; } + + /// + /// The entry last modified time in the archive, if recorded + /// + public abstract DateTime? LastModifiedTime { get; } + + /// + /// The entry create time in the archive, if recorded + /// + public abstract DateTime? CreatedTime { get; } + + /// + /// The entry last accessed time in the archive, if recorded + /// + public abstract DateTime? LastAccessedTime { get; } + + /// + /// The entry time when archived, if recorded + /// + public abstract DateTime? ArchivedTime { get; } + + /// + /// Entry is password protected and encrypted and cannot be extracted. + /// + public abstract bool IsEncrypted { get; } + + /// + /// Entry is directory. + /// + public abstract bool IsDirectory { get; } + + /// + /// Entry is split among multiple volumes + /// + public abstract bool IsSplitAfter { get; } + + /// + public override string ToString() + { + return Key; + } + + internal abstract IEnumerable Parts { get; } + internal bool IsSolid { get; set; } + + internal virtual void Close() + { + } + + /// + /// Entry file attribute. + /// + public virtual int? Attrib => throw new NotImplementedException(); + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Common/EntryStream.cs b/BizHawk.Client.Common/SharpCompress/Common/EntryStream.cs new file mode 100644 index 0000000000..fe7fd3e748 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/EntryStream.cs @@ -0,0 +1,92 @@ +using System; +using System.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Common +{ + public class EntryStream : Stream + { + private readonly IReader _reader; + private readonly Stream _stream; + private bool _completed; + private bool _isDisposed; + + internal EntryStream(IReader reader, Stream stream) + { + _reader = reader; + _stream = stream; + } + + /// + /// When reading a stream from OpenEntryStream, the stream must be completed so use this to finish reading the entire entry. + /// + public void SkipEntry() + { + this.Skip(); + _completed = true; + } + + protected override void Dispose(bool disposing) + { + if (!(_completed || _reader.Cancelled)) + { + SkipEntry(); + } + if (_isDisposed) + { + return; + } + _isDisposed = true; + base.Dispose(disposing); + _stream.Dispose(); + } + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override void Flush() { + } + + public override long Length => _stream.Length; + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + int read = _stream.Read(buffer, offset, count); + if (read <= 0) + { + _completed = true; + } + return read; + } + + public override int ReadByte() + { + int value = _stream.ReadByte(); + if (value == -1) + { + _completed = true; + } + return value; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/ExtractionException.cs b/BizHawk.Client.Common/SharpCompress/Common/ExtractionException.cs new file mode 100644 index 0000000000..be5e688c3a --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/ExtractionException.cs @@ -0,0 +1,17 @@ +using System; + +namespace SharpCompress.Common +{ + public class ExtractionException : Exception + { + public ExtractionException(string message) + : base(message) + { + } + + public ExtractionException(string message, Exception inner) + : base(message, inner) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/ExtractionMethods.cs b/BizHawk.Client.Common/SharpCompress/Common/ExtractionMethods.cs new file mode 100644 index 0000000000..15efd22010 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/ExtractionMethods.cs @@ -0,0 +1,98 @@ +#if !NO_FILE +using System; +using System.IO; +#endif + +namespace SharpCompress.Common +{ + internal static class ExtractionMethods + { + +#if !NO_FILE + /// + /// Extract to specific directory, retaining filename + /// + public static void WriteEntryToDirectory(IEntry entry, string destinationDirectory, + ExtractionOptions options, Action write) + { + string destinationFileName; + string file = Path.GetFileName(entry.Key); + string fullDestinationDirectoryPath = Path.GetFullPath(destinationDirectory); + + options = options ?? new ExtractionOptions() + { + Overwrite = true + }; + + if (options.ExtractFullPath) + { + string folder = Path.GetDirectoryName(entry.Key); + string destdir = Path.GetFullPath( + Path.Combine(fullDestinationDirectoryPath, folder) + ); + + if (!Directory.Exists(destdir)) + { + if (!destdir.StartsWith(fullDestinationDirectoryPath)) + { + throw new ExtractionException("Entry is trying to create a directory outside of the destination directory."); + } + + Directory.CreateDirectory(destdir); + } + destinationFileName = Path.Combine(destdir, file); + } + else + { + destinationFileName = Path.Combine(fullDestinationDirectoryPath, file); + + } + + if (!entry.IsDirectory) + { + destinationFileName = Path.GetFullPath(destinationFileName); + + if (!destinationFileName.StartsWith(fullDestinationDirectoryPath)) + { + throw new ExtractionException("Entry is trying to write a file outside of the destination directory."); + } + write(destinationFileName, options); + } + else if (options.ExtractFullPath && !Directory.Exists(destinationFileName)) + { + Directory.CreateDirectory(destinationFileName); + } + } + + public static void WriteEntryToFile(IEntry entry, string destinationFileName, + ExtractionOptions options, + Action openAndWrite) + { + if (entry.LinkTarget != null) + { + if (null == options.WriteSymbolicLink) + { + throw new ExtractionException("Entry is a symbolic link but ExtractionOptions.WriteSymbolicLink delegate is null"); + } + options.WriteSymbolicLink(destinationFileName, entry.LinkTarget); + } + else + { + FileMode fm = FileMode.Create; + options = options ?? new ExtractionOptions() + { + Overwrite = true + }; + + if (!options.Overwrite) + { + fm = FileMode.CreateNew; + } + + openAndWrite(destinationFileName, fm); + entry.PreserveExtractionOptions(destinationFileName, options); + } + } +#endif + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/ExtractionOptions.cs b/BizHawk.Client.Common/SharpCompress/Common/ExtractionOptions.cs new file mode 100644 index 0000000000..7f6e1efcc9 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/ExtractionOptions.cs @@ -0,0 +1,34 @@ +namespace SharpCompress.Common +{ + public class ExtractionOptions + { + /// + /// overwrite target if it exists + /// + public bool Overwrite {get; set; } + + /// + /// extract with internal directory structure + /// + public bool ExtractFullPath { get; set; } + + /// + /// preserve file time + /// + public bool PreserveFileTime { get; set; } + + /// + /// preserve windows file attributes + /// + public bool PreserveAttributes { get; set; } + + /// + /// Delegate for writing symbolic links to disk. + /// sourcePath is where the symlink is created. + /// targetPath is what the symlink refers to. + /// + public delegate void SymbolicLinkWriterDelegate(string sourcePath, string targetPath); + + public SymbolicLinkWriterDelegate WriteSymbolicLink; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/FilePart.cs b/BizHawk.Client.Common/SharpCompress/Common/FilePart.cs new file mode 100644 index 0000000000..85bdc894f6 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/FilePart.cs @@ -0,0 +1,20 @@ +using System.IO; + +namespace SharpCompress.Common +{ + public abstract class FilePart + { + protected FilePart(ArchiveEncoding archiveEncoding) + { + ArchiveEncoding = archiveEncoding; + } + + internal ArchiveEncoding ArchiveEncoding { get; } + + internal abstract string FilePartName { get; } + + internal abstract Stream GetCompressedStream(); + internal abstract Stream GetRawStream(); + internal bool Skipped { get; set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/FilePartExtractionBeginEventArgs.cs b/BizHawk.Client.Common/SharpCompress/Common/FilePartExtractionBeginEventArgs.cs new file mode 100644 index 0000000000..913f2093ff --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/FilePartExtractionBeginEventArgs.cs @@ -0,0 +1,22 @@ +using System; + +namespace SharpCompress.Common +{ + public class FilePartExtractionBeginEventArgs : EventArgs + { + /// + /// File name for the part for the current entry + /// + public string Name { get; internal set; } + + /// + /// Uncompressed size of the current entry in the part + /// + public long Size { get; internal set; } + + /// + /// Compressed size of the current entry in the part + /// + public long CompressedSize { get; internal set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/FlagUtility.cs b/BizHawk.Client.Common/SharpCompress/Common/FlagUtility.cs new file mode 100644 index 0000000000..8a3aaee1fa --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/FlagUtility.cs @@ -0,0 +1,108 @@ +using System; + +namespace SharpCompress.Common +{ + internal static class FlagUtility + { + /// + /// Returns true if the flag is set on the specified bit field. + /// Currently only works with 32-bit bitfields. + /// + /// Enumeration with Flags attribute + /// Flagged variable + /// Flag to test + /// + public static bool HasFlag(long bitField, T flag) + where T : struct + { + return HasFlag(bitField, flag); + } + + /// + /// Returns true if the flag is set on the specified bit field. + /// Currently only works with 32-bit bitfields. + /// + /// Enumeration with Flags attribute + /// Flagged variable + /// Flag to test + /// + public static bool HasFlag(ulong bitField, T flag) + where T : struct + { + return HasFlag(bitField, flag); + } + + /// + /// Returns true if the flag is set on the specified bit field. + /// Currently only works with 32-bit bitfields. + /// + /// Flagged variable + /// Flag to test + /// + public static bool HasFlag(ulong bitField, ulong flag) + { + return ((bitField & flag) == flag); + } + + public static bool HasFlag(short bitField, short flag) + { + return ((bitField & flag) == flag); + } + + /// + /// Returns true if the flag is set on the specified bit field. + /// Currently only works with 32-bit bitfields. + /// + /// Enumeration with Flags attribute + /// Flagged variable + /// Flag to test + /// + public static bool HasFlag(T bitField, T flag) + where T : struct + { + return HasFlag(Convert.ToInt64(bitField), Convert.ToInt64(flag)); + } + + /// + /// Returns true if the flag is set on the specified bit field. + /// Currently only works with 32-bit bitfields. + /// + /// Flagged variable + /// Flag to test + /// + public static bool HasFlag(long bitField, long flag) + { + return ((bitField & flag) == flag); + } + + /// + /// Sets a bit-field to either on or off for the specified flag. + /// + /// Flagged variable + /// Flag to change + /// bool + /// The flagged variable with the flag changed + public static long SetFlag(long bitField, long flag, bool on) + { + if (on) + { + return bitField | flag; + } + return bitField & (~flag); + } + + /// + /// Sets a bit-field to either on or off for the specified flag. + /// + /// Enumeration with Flags attribute + /// Flagged variable + /// Flag to change + /// bool + /// The flagged variable with the flag changed + public static long SetFlag(T bitField, T flag, bool on) + where T : struct + { + return SetFlag(Convert.ToInt64(bitField), Convert.ToInt64(flag), on); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/GZip/GZipEntry.cs b/BizHawk.Client.Common/SharpCompress/Common/GZip/GZipEntry.cs new file mode 100644 index 0000000000..8b2d3e9378 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/GZip/GZipEntry.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; + +namespace SharpCompress.Common.GZip +{ + public class GZipEntry : Entry + { + private readonly GZipFilePart _filePart; + + internal GZipEntry(GZipFilePart filePart) + { + _filePart = filePart; + } + + public override CompressionType CompressionType => CompressionType.GZip; + + public override long Crc => 0; + + public override string Key => _filePart.FilePartName; + + public override string LinkTarget => null; + + public override long CompressedSize => 0; + + public override long Size => 0; + + public override DateTime? LastModifiedTime => _filePart.DateModified; + + public override DateTime? CreatedTime => null; + + public override DateTime? LastAccessedTime => null; + + public override DateTime? ArchivedTime => null; + + public override bool IsEncrypted => false; + + public override bool IsDirectory => false; + + public override bool IsSplitAfter => false; + + internal override IEnumerable Parts => _filePart.AsEnumerable(); + + internal static IEnumerable GetEntries(Stream stream, OptionsBase options) + { + yield return new GZipEntry(new GZipFilePart(stream, options.ArchiveEncoding)); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/GZip/GZipFilePart.cs b/BizHawk.Client.Common/SharpCompress/Common/GZip/GZipFilePart.cs new file mode 100644 index 0000000000..1f942e7459 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/GZip/GZipFilePart.cs @@ -0,0 +1,120 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common.Tar.Headers; +using SharpCompress.Compressors; +using SharpCompress.Compressors.Deflate; +using SharpCompress.Converters; +using System.Text; + +namespace SharpCompress.Common.GZip +{ + internal class GZipFilePart : FilePart + { + private string _name; + private readonly Stream _stream; + + internal GZipFilePart(Stream stream, ArchiveEncoding archiveEncoding) + : base(archiveEncoding) + { + ReadAndValidateGzipHeader(stream); + EntryStartPosition = stream.Position; + _stream = stream; + } + + internal long EntryStartPosition { get; } + + internal DateTime? DateModified { get; private set; } + + internal override string FilePartName => _name; + + internal override Stream GetCompressedStream() + { + return new DeflateStream(_stream, CompressionMode.Decompress, CompressionLevel.Default); + } + + internal override Stream GetRawStream() + { + return _stream; + } + + private void ReadAndValidateGzipHeader(Stream stream) + { + // read the header on the first read + byte[] header = new byte[10]; + int n = stream.Read(header, 0, header.Length); + + // workitem 8501: handle edge case (decompress empty stream) + if (n == 0) + { + return; + } + + if (n != 10) + { + throw new ZlibException("Not a valid GZIP stream."); + } + + if (header[0] != 0x1F || header[1] != 0x8B || header[2] != 8) + { + throw new ZlibException("Bad GZIP header."); + } + + Int32 timet = DataConverter.LittleEndian.GetInt32(header, 4); + DateModified = TarHeader.EPOCH.AddSeconds(timet); + if ((header[3] & 0x04) == 0x04) + { + // read and discard extra field + n = stream.Read(header, 0, 2); // 2-byte length field + + Int16 extraLength = (Int16)(header[0] + header[1] * 256); + byte[] extra = new byte[extraLength]; + + if (!stream.ReadFully(extra)) + { + throw new ZlibException("Unexpected end-of-file reading GZIP header."); + } + n = extraLength; + } + if ((header[3] & 0x08) == 0x08) + { + _name = ReadZeroTerminatedString(stream); + } + if ((header[3] & 0x10) == 0x010) + { + ReadZeroTerminatedString(stream); + } + if ((header[3] & 0x02) == 0x02) + { + stream.ReadByte(); // CRC16, ignore + } + } + + private string ReadZeroTerminatedString(Stream stream) + { + byte[] buf1 = new byte[1]; + var list = new List(); + bool done = false; + do + { + // workitem 7740 + int n = stream.Read(buf1, 0, 1); + if (n != 1) + { + throw new ZlibException("Unexpected EOF reading GZIP header."); + } + if (buf1[0] == 0) + { + done = true; + } + else + { + list.Add(buf1[0]); + } + } + while (!done); + byte[] buffer = list.ToArray(); + return ArchiveEncoding.Decode(buffer); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/GZip/GZipVolume.cs b/BizHawk.Client.Common/SharpCompress/Common/GZip/GZipVolume.cs new file mode 100644 index 0000000000..7da73560e9 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/GZip/GZipVolume.cs @@ -0,0 +1,25 @@ +using System.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Common.GZip +{ + public class GZipVolume : Volume + { + public GZipVolume(Stream stream, ReaderOptions options) + : base(stream, options) + { + } + +#if !NO_FILE + public GZipVolume(FileInfo fileInfo, ReaderOptions options) + : base(fileInfo.OpenRead(), options) + { + options.LeaveStreamOpen = false; + } +#endif + + public override bool IsFirstVolume => true; + + public override bool IsMultiVolume => true; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/IEntry.Extensions.cs b/BizHawk.Client.Common/SharpCompress/Common/IEntry.Extensions.cs new file mode 100644 index 0000000000..76d75a8f53 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/IEntry.Extensions.cs @@ -0,0 +1,51 @@ + +#if !NO_FILE +using System.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Common +{ + internal static class EntryExtensions + { + internal static void PreserveExtractionOptions(this IEntry entry, string destinationFileName, + ExtractionOptions options) + { + if (options.PreserveFileTime || options.PreserveAttributes) + { + FileInfo nf = new FileInfo(destinationFileName); + if (!nf.Exists) + { + return; + } + + // update file time to original packed time + if (options.PreserveFileTime) + { + if (entry.CreatedTime.HasValue) + { + nf.CreationTime = entry.CreatedTime.Value; + } + + if (entry.LastModifiedTime.HasValue) + { + nf.LastWriteTime = entry.LastModifiedTime.Value; + } + + if (entry.LastAccessedTime.HasValue) + { + nf.LastAccessTime = entry.LastAccessedTime.Value; + } + } + + if (options.PreserveAttributes) + { + if (entry.Attrib.HasValue) + { + nf.Attributes = (FileAttributes)System.Enum.ToObject(typeof(FileAttributes), entry.Attrib.Value); + } + } + } + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/IEntry.cs b/BizHawk.Client.Common/SharpCompress/Common/IEntry.cs new file mode 100644 index 0000000000..84cfcb33a7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/IEntry.cs @@ -0,0 +1,22 @@ +using System; + +namespace SharpCompress.Common +{ + public interface IEntry + { + CompressionType CompressionType { get; } + DateTime? ArchivedTime { get; } + long CompressedSize { get; } + long Crc { get; } + DateTime? CreatedTime { get; } + string Key { get; } + string LinkTarget { get; } + bool IsDirectory { get; } + bool IsEncrypted { get; } + bool IsSplitAfter { get; } + DateTime? LastAccessedTime { get; } + DateTime? LastModifiedTime { get; } + long Size { get; } + int? Attrib { get; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/IExtractionListener.cs b/BizHawk.Client.Common/SharpCompress/Common/IExtractionListener.cs new file mode 100644 index 0000000000..b6fb50fcd5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/IExtractionListener.cs @@ -0,0 +1,8 @@ +namespace SharpCompress.Common +{ + internal interface IExtractionListener + { + void FireFilePartExtractionBegin(string name, long size, long compressedSize); + void FireCompressedBytesRead(long currentPartCompressedBytes, long compressedReadBytes); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/IVolume.cs b/BizHawk.Client.Common/SharpCompress/Common/IVolume.cs new file mode 100644 index 0000000000..d5dac255ec --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/IVolume.cs @@ -0,0 +1,12 @@ +using System; + +#if !NO_FILE +using System.IO; +#endif + +namespace SharpCompress.Common +{ + public interface IVolume : IDisposable + { + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/IncompleteArchiveException.cs b/BizHawk.Client.Common/SharpCompress/Common/IncompleteArchiveException.cs new file mode 100644 index 0000000000..78d567f455 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/IncompleteArchiveException.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Common +{ + public class IncompleteArchiveException : ArchiveException + { + public IncompleteArchiveException(string message) + : base(message) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/InvalidFormatException.cs b/BizHawk.Client.Common/SharpCompress/Common/InvalidFormatException.cs new file mode 100644 index 0000000000..fa141cb465 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/InvalidFormatException.cs @@ -0,0 +1,17 @@ +using System; + +namespace SharpCompress.Common +{ + public class InvalidFormatException : ExtractionException + { + public InvalidFormatException(string message) + : base(message) + { + } + + public InvalidFormatException(string message, Exception inner) + : base(message, inner) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/MultiVolumeExtractionException.cs b/BizHawk.Client.Common/SharpCompress/Common/MultiVolumeExtractionException.cs new file mode 100644 index 0000000000..d9b97fa3e5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/MultiVolumeExtractionException.cs @@ -0,0 +1,17 @@ +using System; + +namespace SharpCompress.Common +{ + public class MultiVolumeExtractionException : ExtractionException + { + public MultiVolumeExtractionException(string message) + : base(message) + { + } + + public MultiVolumeExtractionException(string message, Exception inner) + : base(message, inner) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/MultipartStreamRequiredException.cs b/BizHawk.Client.Common/SharpCompress/Common/MultipartStreamRequiredException.cs new file mode 100644 index 0000000000..cf030ed613 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/MultipartStreamRequiredException.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Common +{ + public class MultipartStreamRequiredException : ExtractionException + { + public MultipartStreamRequiredException(string message) + : base(message) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/OptionsBase.cs b/BizHawk.Client.Common/SharpCompress/Common/OptionsBase.cs new file mode 100644 index 0000000000..b98f0a6020 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/OptionsBase.cs @@ -0,0 +1,13 @@ + +namespace SharpCompress.Common +{ + public class OptionsBase + { + /// + /// SharpCompress will keep the supplied streams open. Default is true. + /// + public bool LeaveStreamOpen { get; set; } = true; + + public ArchiveEncoding ArchiveEncoding { get; set; } = new ArchiveEncoding(); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/PasswordProtectedException.cs b/BizHawk.Client.Common/SharpCompress/Common/PasswordProtectedException.cs new file mode 100644 index 0000000000..58dc766036 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/PasswordProtectedException.cs @@ -0,0 +1,17 @@ +using System; + +namespace SharpCompress.Common +{ + public class PasswordProtectedException : ExtractionException + { + public PasswordProtectedException(string message) + : base(message) + { + } + + public PasswordProtectedException(string message, Exception inner) + : base(message, inner) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/AVHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/AVHeader.cs new file mode 100644 index 0000000000..51dda2cf25 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/AVHeader.cs @@ -0,0 +1,30 @@ +using SharpCompress.IO; + +namespace SharpCompress.Common.Rar.Headers +{ + internal class AvHeader : RarHeader + { + public AvHeader(RarHeader header, RarCrcBinaryReader reader) + : base(header, reader, HeaderType.Av) + { + if (IsRar5) + throw new InvalidFormatException("unexpected rar5 record"); + } + + protected override void ReadFinish(MarkingBinaryReader reader) + { + UnpackVersion = reader.ReadByte(); + Method = reader.ReadByte(); + AvVersion = reader.ReadByte(); + AvInfoCrc = reader.ReadInt32(); + } + + internal int AvInfoCrc { get; private set; } + + internal byte UnpackVersion { get; private set; } + + internal byte Method { get; private set; } + + internal byte AvVersion { get; private set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ArchiveCryptHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ArchiveCryptHeader.cs new file mode 100644 index 0000000000..389a069764 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ArchiveCryptHeader.cs @@ -0,0 +1,57 @@ +using SharpCompress.IO; + +namespace SharpCompress.Common.Rar.Headers +{ + internal class ArchiveCryptHeader : RarHeader + { + + private const int CRYPT_VERSION = 0; // Supported encryption version. + private const int SIZE_SALT50 = 16; + private const int SIZE_SALT30 = 8; + private const int SIZE_INITV = 16; + private const int SIZE_PSWCHECK = 8; + private const int SIZE_PSWCHECK_CSUM = 4; + private const int CRYPT5_KDF_LG2_COUNT = 15; // LOG2 of PDKDF2 iteration count. + private const int CRYPT5_KDF_LG2_COUNT_MAX = 24; // LOG2 of maximum accepted iteration count. + + + private bool _usePswCheck; + private uint _lg2Count; // Log2 of PBKDF2 repetition count. + private byte[] _salt; + private byte[] _pswCheck; + private byte[] _pswCheckCsm; + + public ArchiveCryptHeader(RarHeader header, RarCrcBinaryReader reader) + : base(header, reader, HeaderType.Crypt) + { + } + + protected override void ReadFinish(MarkingBinaryReader reader) + { + var cryptVersion = reader.ReadRarVIntUInt32(); + if (cryptVersion > CRYPT_VERSION) + { + //error? + return; + } + var encryptionFlags = reader.ReadRarVIntUInt32(); + _usePswCheck = FlagUtility.HasFlag(encryptionFlags, EncryptionFlagsV5.CHFL_CRYPT_PSWCHECK); + _lg2Count = reader.ReadRarVIntByte(1); + + + //UsePswCheck = HasHeaderFlag(EncryptionFlagsV5.CHFL_CRYPT_PSWCHECK); + if (_lg2Count > CRYPT5_KDF_LG2_COUNT_MAX) + { + //error? + return; + } + + _salt = reader.ReadBytes(SIZE_SALT50); + if (_usePswCheck) + { + _pswCheck = reader.ReadBytes(SIZE_PSWCHECK); + _pswCheckCsm = reader.ReadBytes(SIZE_PSWCHECK_CSUM); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ArchiveHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ArchiveHeader.cs new file mode 100644 index 0000000000..72b4bd4b91 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ArchiveHeader.cs @@ -0,0 +1,81 @@ +using SharpCompress.IO; + +namespace SharpCompress.Common.Rar.Headers +{ + internal class ArchiveHeader : RarHeader + { + public ArchiveHeader(RarHeader header, RarCrcBinaryReader reader) + : base(header, reader, HeaderType.Archive) + { + } + + protected override void ReadFinish(MarkingBinaryReader reader) + { + if (IsRar5) + { + Flags = reader.ReadRarVIntUInt16(); + if (HasFlag(ArchiveFlagsV5.HAS_VOLUME_NUMBER)) + { + VolumeNumber = (int)reader.ReadRarVIntUInt32(); + } + // later: we may have a locator record if we need it + //if (ExtraSize != 0) { + // ReadLocator(reader); + //} + } + else + { + Flags = HeaderFlags; + HighPosAv = reader.ReadInt16(); + PosAv = reader.ReadInt32(); + if (HasFlag(ArchiveFlagsV4.ENCRYPT_VER)) + { + EncryptionVersion = reader.ReadByte(); + } + } + } + + private void ReadLocator(MarkingBinaryReader reader) { + var size = reader.ReadRarVIntUInt16(); + var type = reader.ReadRarVIntUInt16(); + if (type != 1) throw new InvalidFormatException("expected locator record"); + var flags = reader.ReadRarVIntUInt16(); + const ushort hasQuickOpenOffset = 0x01; + const ushort hasRecoveryOffset = 0x02; + ulong quickOpenOffset = 0; + if ((flags & hasQuickOpenOffset) == hasQuickOpenOffset) { + quickOpenOffset = reader.ReadRarVInt(); + } + ulong recoveryOffset = 0; + if ((flags & hasRecoveryOffset) == hasRecoveryOffset) { + recoveryOffset = reader.ReadRarVInt(); + } + } + + private ushort Flags { get; set; } + + private bool HasFlag(ushort flag) + { + return (Flags & flag) == flag; + } + + internal int? VolumeNumber { get; private set; } + + internal short? HighPosAv { get; private set; } + + internal int? PosAv { get; private set; } + + private byte? EncryptionVersion { get; set; } + + public bool? IsEncrypted => IsRar5 ? (bool?)null : HasFlag(ArchiveFlagsV4.PASSWORD); + + public bool OldNumberingFormat => !IsRar5 && !HasFlag(ArchiveFlagsV4.NEW_NUMBERING); + + public bool IsVolume => HasFlag(IsRar5 ? ArchiveFlagsV5.VOLUME : ArchiveFlagsV4.VOLUME); + + // RAR5: Volume number field is present. True for all volumes except first. + public bool IsFirstVolume => IsRar5 ? VolumeNumber == null : HasFlag(ArchiveFlagsV4.FIRST_VOLUME); + + public bool IsSolid => HasFlag(IsRar5 ? ArchiveFlagsV5.SOLID : ArchiveFlagsV4.SOLID); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/CommentHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/CommentHeader.cs new file mode 100644 index 0000000000..4845ce2bd5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/CommentHeader.cs @@ -0,0 +1,28 @@ +using SharpCompress.IO; + +namespace SharpCompress.Common.Rar.Headers +{ + internal class CommentHeader : RarHeader + { + protected CommentHeader(RarHeader header, RarCrcBinaryReader reader) + : base(header, reader, HeaderType.Comment) + { + if (IsRar5) throw new InvalidFormatException("unexpected rar5 record"); + } + + protected override void ReadFinish(MarkingBinaryReader reader) + { + UnpSize = reader.ReadInt16(); + UnpVersion = reader.ReadByte(); + UnpMethod = reader.ReadByte(); + CommCrc = reader.ReadInt16(); + } + + internal short UnpSize { get; private set; } + + internal byte UnpVersion { get; private set; } + + internal byte UnpMethod { get; private set; } + internal short CommCrc { get; private set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/EndArchiveHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/EndArchiveHeader.cs new file mode 100644 index 0000000000..926a8dfe19 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/EndArchiveHeader.cs @@ -0,0 +1,43 @@ +using SharpCompress.IO; + +namespace SharpCompress.Common.Rar.Headers +{ + internal class EndArchiveHeader : RarHeader + { + public EndArchiveHeader(RarHeader header, RarCrcBinaryReader reader) + : base(header, reader, HeaderType.EndArchive) + { + } + + protected override void ReadFinish(MarkingBinaryReader reader) + { + if (IsRar5) + { + Flags = reader.ReadRarVIntUInt16(); + } + else + { + Flags = HeaderFlags; + if (HasFlag(EndArchiveFlagsV4.DATA_CRC)) + { + ArchiveCrc = reader.ReadInt32(); + } + if (HasFlag(EndArchiveFlagsV4.VOLUME_NUMBER)) + { + VolumeNumber = reader.ReadInt16(); + } + } + } + + private ushort Flags { get; set; } + + private bool HasFlag(ushort flag) + { + return (Flags & flag) == flag; + } + + internal int? ArchiveCrc { get; private set; } + + internal short? VolumeNumber { get; private set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/FileHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/FileHeader.cs new file mode 100644 index 0000000000..09ded0fc62 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/FileHeader.cs @@ -0,0 +1,452 @@ +#if !Rar2017_64bit +using nint = System.Int32; +using nuint = System.UInt32; +using size_t = System.UInt32; +#else +using nint = System.Int64; +using nuint = System.UInt64; +using size_t = System.UInt64; +#endif + +using SharpCompress.IO; +using System; +using System.IO; +using System.Text; + +namespace SharpCompress.Common.Rar.Headers +{ + internal class FileHeader : RarHeader + { + private uint _fileCrc; + + public FileHeader(RarHeader header, RarCrcBinaryReader reader, HeaderType headerType) + : base(header, reader, headerType) + { + } + + protected override void ReadFinish(MarkingBinaryReader reader) + { + if (IsRar5) + { + ReadFromReaderV5(reader); + } + else + { + ReadFromReaderV4(reader); + } + } + + private void ReadFromReaderV5(MarkingBinaryReader reader) + { + Flags = reader.ReadRarVIntUInt16(); + + var lvalue = checked((long)reader.ReadRarVInt()); + + // long.MaxValue causes the unpack code to finish when the input stream is exhausted + UncompressedSize = HasFlag(FileFlagsV5.UNPACKED_SIZE_UNKNOWN) ? long.MaxValue : lvalue; + + FileAttributes = reader.ReadRarVIntUInt32(); + + if (HasFlag(FileFlagsV5.HAS_MOD_TIME)) { + FileLastModifiedTime = Utility.UnixTimeToDateTime(reader.ReadUInt32()); + } + + if (HasFlag(FileFlagsV5.HAS_CRC32)) { + FileCrc = reader.ReadUInt32(); + } + + var compressionInfo = reader.ReadRarVIntUInt16(); + + // Lower 6 bits (0x003f mask) contain the version of compression algorithm, resulting in possible 0 - 63 values. Current version is 0. + // "+ 50" to not mix with old RAR format algorithms. For example, + // we may need to use the compression algorithm 15 in the future, + // but it was already used in RAR 1.5 and Unpack needs to distinguish + // them. + CompressionAlgorithm = (byte)((compressionInfo & 0x3f) + 50); + + // 7th bit (0x0040) defines the solid flag. If it is set, RAR continues to use the compression dictionary left after processing preceding files. + // It can be set only for file headers and is never set for service headers. + IsSolid = (compressionInfo & 0x40) == 0x40; + + // Bits 8 - 10 (0x0380 mask) define the compression method. Currently only values 0 - 5 are used. 0 means no compression. + CompressionMethod = (byte)((compressionInfo >> 7) & 0x7); + + // Bits 11 - 14 (0x3c00) define the minimum size of dictionary size required to extract data. Value 0 means 128 KB, 1 - 256 KB, ..., 14 - 2048 MB, 15 - 4096 MB. + WindowSize = IsDirectory ? 0 : ((size_t)0x20000) << ((compressionInfo>>10) & 0xf); + + HostOs = reader.ReadRarVIntByte(); + + var nameSize = reader.ReadRarVIntUInt16(); + + // Variable length field containing Name length bytes in UTF-8 format without trailing zero. + // For file header this is a name of archived file. Forward slash character is used as the path separator both for Unix and Windows names. + // Backslashes are treated as a part of name for Unix names and as invalid character for Windows file names. Type of name is defined by Host OS field. + // + // TODO: not sure if anything needs to be done to handle the following: + // If Unix file name contains any high ASCII characters which cannot be correctly converted to Unicode and UTF-8 + // we map such characters to to 0xE080 - 0xE0FF private use Unicode area and insert 0xFFFE Unicode non-character + // to resulting string to indicate that it contains mapped characters, which need to be converted back when extracting. + // Concrete position of 0xFFFE is not defined, we need to search the entire string for it. Such mapped names are not + // portable and can be correctly unpacked only on the same system where they were created. + // + // For service header this field contains a name of service header. Now the following names are used: + // CMT Archive comment + // QO Archive quick open data + // ACL NTFS file permissions + // STM NTFS alternate data stream + // RR Recovery record + var b = reader.ReadBytes(nameSize); + FileName = ConvertPathV5(Encoding.UTF8.GetString(b, 0, b.Length)); + + // extra size seems to be redudant since we know the total header size + if (ExtraSize != RemainingHeaderBytes(reader)) + { + throw new InvalidFormatException("rar5 header size / extra size inconsistency"); + } + + isEncryptedRar5 = false; + + while (RemainingHeaderBytes(reader) > 0) { + var size = reader.ReadRarVIntUInt16(); + int n = RemainingHeaderBytes(reader); + var type = reader.ReadRarVIntUInt16(); + switch (type) { + //TODO + case 1: // file encryption + { + isEncryptedRar5 = true; + + //var version = reader.ReadRarVIntByte(); + //if (version != 0) throw new InvalidFormatException("unknown encryption algorithm " + version); + } + break; + // case 2: // file hash + // { + // + // } + // break; + case 3: // file time + { + ushort flags = reader.ReadRarVIntUInt16(); + var isWindowsTime = (flags & 1) == 0; + if ((flags & 0x2) == 0x2) { + FileLastModifiedTime = ReadExtendedTimeV5(reader, isWindowsTime); + } + if ((flags & 0x4) == 0x4) { + FileCreatedTime = ReadExtendedTimeV5(reader, isWindowsTime); + } + if ((flags & 0x8) == 0x8) { + FileLastAccessedTime = ReadExtendedTimeV5(reader, isWindowsTime); + } + } + break; +//TODO +// case 4: // file version +// { +// +// } +// break; +// case 5: // file system redirection +// { +// +// } +// break; +// case 6: // unix owner +// { +// +// } +// break; +// case 7: // service data +// { +// +// } +// break; + + default: + // skip unknown record types to allow new record types to be added in the future + break; + } + // drain any trailing bytes of extra record + int did = n - RemainingHeaderBytes(reader); + int drain = size - did; + if (drain > 0) + { + reader.ReadBytes(drain); + } + } + + if (AdditionalDataSize != 0) { + CompressedSize = AdditionalDataSize; + } + } + + + private static DateTime ReadExtendedTimeV5(MarkingBinaryReader reader, bool isWindowsTime) + { + if (isWindowsTime) + { + return DateTime.FromFileTime(reader.ReadInt64()); + } + else + { + return Utility.UnixTimeToDateTime(reader.ReadUInt32()); + } + } + + private static string ConvertPathV5(string path) + { +#if NO_FILE + // not sure what to do here + throw new NotImplementedException("TODO"); +#else + if (Path.DirectorySeparatorChar == '\\') + { + // replace embedded \\ with valid filename char + return path.Replace('\\', '-').Replace('/', '\\'); + } + return path; +#endif + } + + + private void ReadFromReaderV4(MarkingBinaryReader reader) + { + Flags = HeaderFlags; + IsSolid = HasFlag(FileFlagsV4.SOLID); + WindowSize = IsDirectory ? 0U : ((size_t)0x10000) << ((Flags & FileFlagsV4.WINDOW_MASK) >> 5); + + uint lowUncompressedSize = reader.ReadUInt32(); + + HostOs = reader.ReadByte(); + + FileCrc = reader.ReadUInt32(); + + FileLastModifiedTime = Utility.DosDateToDateTime(reader.ReadUInt32()); + + CompressionAlgorithm = reader.ReadByte(); + CompressionMethod = (byte)(reader.ReadByte() - 0x30); + + short nameSize = reader.ReadInt16(); + + FileAttributes = reader.ReadUInt32(); + + uint highCompressedSize = 0; + uint highUncompressedkSize = 0; + if (HasFlag(FileFlagsV4.LARGE)) + { + highCompressedSize = reader.ReadUInt32(); + highUncompressedkSize = reader.ReadUInt32(); + } + else + { + if (lowUncompressedSize == 0xffffffff) + { + lowUncompressedSize = 0xffffffff; + highUncompressedkSize = int.MaxValue; + } + } + CompressedSize = UInt32To64(highCompressedSize, checked((uint)AdditionalDataSize)); + UncompressedSize = UInt32To64(highUncompressedkSize, lowUncompressedSize); + + nameSize = nameSize > 4 * 1024 ? (short)(4 * 1024) : nameSize; + + byte[] fileNameBytes = reader.ReadBytes(nameSize); + + const int saltSize = 8; + const int newLhdSize = 32; + + switch (HeaderCode) + { + case HeaderCodeV.RAR4_FILE_HEADER: + { + if (HasFlag(FileFlagsV4.UNICODE)) + { + int length = 0; + while (length < fileNameBytes.Length + && fileNameBytes[length] != 0) + { + length++; + } + if (length != nameSize) + { + length++; + FileName = FileNameDecoder.Decode(fileNameBytes, length); + } + else + { + FileName = ArchiveEncoding.Decode(fileNameBytes); + } + } + else + { + FileName = ArchiveEncoding.Decode(fileNameBytes); + } + FileName = ConvertPathV4(FileName); + } + break; + case HeaderCodeV.RAR4_NEW_SUB_HEADER: + { + int datasize = HeaderSize - newLhdSize - nameSize; + if (HasFlag(FileFlagsV4.SALT)) + { + datasize -= saltSize; + } + if (datasize > 0) + { + SubData = reader.ReadBytes(datasize); + } + + if (NewSubHeaderType.SUBHEAD_TYPE_RR.Equals(fileNameBytes)) + { + RecoverySectors = SubData[8] + (SubData[9] << 8) + + (SubData[10] << 16) + (SubData[11] << 24); + } + } + break; + } + + if (HasFlag(FileFlagsV4.SALT)) + { + R4Salt = reader.ReadBytes(saltSize); + } + if (HasFlag(FileFlagsV4.EXT_TIME)) + { + // verify that the end of the header hasn't been reached before reading the Extended Time. + // some tools incorrectly omit Extended Time despite specifying FileFlags.EXTTIME, which most parsers tolerate. + if (RemainingHeaderBytes(reader) >= 2) + { + ushort extendedFlags = reader.ReadUInt16(); + FileLastModifiedTime = ProcessExtendedTimeV4(extendedFlags, FileLastModifiedTime, reader, 0); + FileCreatedTime = ProcessExtendedTimeV4(extendedFlags, null, reader, 1); + FileLastAccessedTime = ProcessExtendedTimeV4(extendedFlags, null, reader, 2); + FileArchivedTime = ProcessExtendedTimeV4(extendedFlags, null, reader, 3); + } + } + } + + private static long UInt32To64(uint x, uint y) + { + long l = x; + l <<= 32; + return l + y; + } + + private static DateTime? ProcessExtendedTimeV4(ushort extendedFlags, DateTime? time, MarkingBinaryReader reader, int i) + { + uint rmode = (uint)extendedFlags >> (3 - i) * 4; + if ((rmode & 8) == 0) + { + return null; + } + if (i != 0) + { + uint dosTime = reader.ReadUInt32(); + time = Utility.DosDateToDateTime(dosTime); + } + if ((rmode & 4) == 0) + { + time = time.Value.AddSeconds(1); + } + uint nanosecondHundreds = 0; + int count = (int)rmode & 3; + for (int j = 0; j < count; j++) + { + byte b = reader.ReadByte(); + nanosecondHundreds |= (((uint)b) << ((j + 3 - count) * 8)); + } + + //10^-7 to 10^-3 + return time.Value.AddMilliseconds(nanosecondHundreds * Math.Pow(10, -4)); + } + + private static string ConvertPathV4(string path) + { +#if NO_FILE + return path.Replace('\\', '/'); +#else + if (Path.DirectorySeparatorChar == '/') + { + return path.Replace('\\', '/'); + } + else if (Path.DirectorySeparatorChar == '\\') + { + return path.Replace('/', '\\'); + } + return path; +#endif + } + + public override string ToString() + { + return FileName; + } + + private ushort Flags { get; set; } + + private bool HasFlag(ushort flag) + { + return (Flags & flag) == flag; + } + + internal uint FileCrc + { + get { + if (IsRar5 && !HasFlag(FileFlagsV5.HAS_CRC32)) { +//!!! rar5: + throw new InvalidOperationException("TODO rar5"); + } + return _fileCrc; + } + private set => _fileCrc = value; + } + + // 0 - storing + // 1 - fastest compression + // 2 - fast compression + // 3 - normal compression + // 4 - good compression + // 5 - best compression + internal byte CompressionMethod { get; private set; } + internal bool IsStored => CompressionMethod == 0; + + // eg (see DoUnpack()) + //case 15: // rar 1.5 compression + //case 20: // rar 2.x compression + //case 26: // files larger than 2GB + //case 29: // rar 3.x compression + //case 50: // RAR 5.0 compression algorithm. + internal byte CompressionAlgorithm { get; private set; } + + public bool IsSolid { get; private set; } + + // unused for UnpackV1 implementation (limitation) + internal size_t WindowSize { get; private set; } + + internal byte[] R4Salt { get; private set; } + + private byte HostOs { get; set; } + internal uint FileAttributes { get; private set; } + internal long CompressedSize { get; private set; } + internal long UncompressedSize { get; private set; } + internal string FileName { get; private set; } + internal byte[] SubData { get; private set; } + internal int RecoverySectors { get; private set; } + internal long DataStartPosition { get; set; } + public Stream PackedStream { get; set; } + + public bool IsSplitAfter => IsRar5 ? HasHeaderFlag(HeaderFlagsV5.SPLIT_AFTER) : HasFlag(FileFlagsV4.SPLIT_AFTER); + + public bool IsDirectory => HasFlag(IsRar5 ? FileFlagsV5.DIRECTORY : FileFlagsV4.DIRECTORY); + + private bool isEncryptedRar5 = false; + public bool IsEncrypted => IsRar5 ? isEncryptedRar5: HasFlag(FileFlagsV4.PASSWORD); + + internal DateTime? FileLastModifiedTime { get; private set; } + + internal DateTime? FileCreatedTime { get; private set; } + + internal DateTime? FileLastAccessedTime { get; private set; } + + internal DateTime? FileArchivedTime { get; private set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/FileNameDecoder.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/FileNameDecoder.cs new file mode 100644 index 0000000000..98da75dae6 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/FileNameDecoder.cs @@ -0,0 +1,78 @@ +using System.Text; + +namespace SharpCompress.Common.Rar.Headers +{ + /// + /// This is for the crazy Rar encoding that I don't understand + /// + internal static class FileNameDecoder + { + internal static int GetChar(byte[] name, int pos) + { + return name[pos] & 0xff; + } + + internal static string Decode(byte[] name, int encPos) + { + int decPos = 0; + int flags = 0; + int flagBits = 0; + + int low = 0; + int high = 0; + int highByte = GetChar(name, encPos++); + StringBuilder buf = new StringBuilder(); + while (encPos < name.Length) + { + if (flagBits == 0) + { + flags = GetChar(name, encPos++); + flagBits = 8; + } + switch (flags >> 6) + { + case 0: + buf.Append((char)(GetChar(name, encPos++))); + ++decPos; + break; + + case 1: + buf.Append((char)(GetChar(name, encPos++) + (highByte << 8))); + ++decPos; + break; + + case 2: + low = GetChar(name, encPos); + high = GetChar(name, encPos + 1); + buf.Append((char)((high << 8) + low)); + ++decPos; + encPos += 2; + break; + + case 3: + int length = GetChar(name, encPos++); + if ((length & 0x80) != 0) + { + int correction = GetChar(name, encPos++); + for (length = (length & 0x7f) + 2; length > 0 && decPos < name.Length; length--, decPos++) + { + low = (GetChar(name, decPos) + correction) & 0xff; + buf.Append((char)((highByte << 8) + low)); + } + } + else + { + for (length += 2; length > 0 && decPos < name.Length; length--, decPos++) + { + buf.Append((char)(GetChar(name, decPos))); + } + } + break; + } + flags = (flags << 2) & 0xff; + flagBits -= 2; + } + return buf.ToString(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/Flags.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/Flags.cs new file mode 100644 index 0000000000..0c19079b2a --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/Flags.cs @@ -0,0 +1,149 @@ +namespace SharpCompress.Common.Rar.Headers +{ + internal enum HeaderType : byte + { + Null, + Mark, + Archive, + File, + Service, + Comment, + Av, + Protect, + Sign, + NewSub, + EndArchive, + Crypt + } + + internal static class HeaderCodeV + { + public const byte RAR4_MARK_HEADER = 0x72; + public const byte RAR4_ARCHIVE_HEADER = 0x73; + public const byte RAR4_FILE_HEADER = 0x74; + public const byte RAR4_COMMENT_HEADER = 0x75; + public const byte RAR4_AV_HEADER = 0x76; + public const byte RAR4_SUB_HEADER = 0x77; + public const byte RAR4_PROTECT_HEADER = 0x78; + public const byte RAR4_SIGN_HEADER = 0x79; + public const byte RAR4_NEW_SUB_HEADER = 0x7a; + public const byte RAR4_END_ARCHIVE_HEADER = 0x7b; + + public const byte RAR5_ARCHIVE_HEADER = 0x01; + public const byte RAR5_FILE_HEADER = 0x02; + public const byte RAR5_SERVICE_HEADER = 0x03; + public const byte RAR5_ARCHIVE_ENCRYPTION_HEADER = 0x04; + public const byte RAR5_END_ARCHIVE_HEADER = 0x05; + } + + internal static class HeaderFlagsV4 + { + public const ushort HAS_DATA = 0x8000; + } + + internal static class EncryptionFlagsV5 + { + // RAR 5.0 archive encryption header specific flags. + public const uint CHFL_CRYPT_PSWCHECK = 0x01; // Password check data is present. + + public const uint FHEXTRA_CRYPT_PSWCHECK = 0x01; // Password check data is present. + public const uint FHEXTRA_CRYPT_HASHMAC = 0x02; + } + + internal static class HeaderFlagsV5 + { + public const ushort HAS_EXTRA = 0x0001; + public const ushort HAS_DATA = 0x0002; + public const ushort KEEP = 0x0004; // block must be kept during an update + public const ushort SPLIT_BEFORE = 0x0008; + public const ushort SPLIT_AFTER = 0x0010; + public const ushort CHILD = 0x0020; // ??? Block depends on preceding file block. + public const ushort PRESERVE_CHILD = 0x0040; // ???? Preserve a child block if host block is modified + } + + internal static class ArchiveFlagsV4 + { + public const ushort VOLUME = 0x0001; + public const ushort COMMENT = 0x0002; + public const ushort LOCK = 0x0004; + public const ushort SOLID = 0x0008; + public const ushort NEW_NUMBERING = 0x0010; + public const ushort AV = 0x0020; + public const ushort PROTECT = 0x0040; + public const ushort PASSWORD = 0x0080; + public const ushort FIRST_VOLUME = 0x0100; + public const ushort ENCRYPT_VER = 0x0200; + } + + internal static class ArchiveFlagsV5 + { + public const ushort VOLUME = 0x0001; + public const ushort HAS_VOLUME_NUMBER = 0x0002; + public const ushort SOLID = 0x0004; + public const ushort PROTECT = 0x0008; + public const ushort LOCK = 0x0010; + } + + internal static class HostOsV4 + { + public const byte MS_DOS = 0; + public const byte OS2 = 1; + public const byte WIN32 = 2; + public const byte UNIX = 3; + public const byte MAC_OS = 4; + public const byte BE_OS = 5; + } + + internal static class HostOsV5 + { + public const byte WINDOWS = 0; + public const byte UNIX = 1; + } + + internal static class FileFlagsV4 + { + public const ushort SPLIT_BEFORE = 0x0001; + public const ushort SPLIT_AFTER = 0x0002; + public const ushort PASSWORD = 0x0004; + public const ushort COMMENT = 0x0008; + public const ushort SOLID = 0x0010; + + public const ushort WINDOW_MASK = 0x00e0; + public const ushort WINDOW64 = 0x0000; + public const ushort WINDOW128 = 0x0020; + public const ushort WINDOW256 = 0x0040; + public const ushort WINDOW512 = 0x0060; + public const ushort WINDOW1024 = 0x0080; + public const ushort WINDOW2048 = 0x00a0; + public const ushort WINDOW4096 = 0x00c0; + public const ushort DIRECTORY = 0x00e0; + + public const ushort LARGE = 0x0100; + public const ushort UNICODE = 0x0200; + public const ushort SALT = 0x0400; + public const ushort VERSION = 0x0800; + public const ushort EXT_TIME = 0x1000; + public const ushort EXT_FLAGS = 0x2000; + } + + internal static class FileFlagsV5 + { + public const ushort DIRECTORY = 0x0001; + public const ushort HAS_MOD_TIME = 0x0002; + public const ushort HAS_CRC32 = 0x0004; + public const ushort UNPACKED_SIZE_UNKNOWN = 0x0008; + } + + internal static class EndArchiveFlagsV4 + { + public const ushort NEXT_VOLUME = 0x0001; + public const ushort DATA_CRC = 0x0002; + public const ushort REV_SPACE = 0x0004; + public const ushort VOLUME_NUMBER = 0x0008; + } + + internal static class EndArchiveFlagsV5 + { + public const ushort HAS_NEXT_VOLUME = 0x0001; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/IRarHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/IRarHeader.cs new file mode 100644 index 0000000000..bbc03593cf --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/IRarHeader.cs @@ -0,0 +1,7 @@ +namespace SharpCompress.Common.Rar.Headers +{ + internal interface IRarHeader + { + HeaderType HeaderType { get; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/MarkHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/MarkHeader.cs new file mode 100644 index 0000000000..3a27cf69f3 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/MarkHeader.cs @@ -0,0 +1,96 @@ +using System; +using System.IO; + +namespace SharpCompress.Common.Rar.Headers +{ + internal class MarkHeader : IRarHeader + { + private const int MAX_SFX_SIZE = 0x80000 - 16; //archive.cpp line 136 + + internal bool OldNumberingFormat { get; private set; } + + public bool IsRar5 { get; } + + private MarkHeader(bool isRar5) + { + IsRar5 = isRar5; + } + + public HeaderType HeaderType => HeaderType.Mark; + + private static byte GetByte(Stream stream) + { + var b = stream.ReadByte(); + if (b != -1) + { + return (byte)b; + } + throw new EndOfStreamException(); + } + + public static MarkHeader Read(Stream stream, bool leaveStreamOpen, bool lookForHeader) + { + int maxScanIndex = lookForHeader ? MAX_SFX_SIZE : 0; + try + { + int start = -1; + var b = GetByte(stream); start++; + while (start <= maxScanIndex) + { + // Rar old signature: 52 45 7E 5E + // Rar4 signature: 52 61 72 21 1A 07 00 + // Rar5 signature: 52 61 72 21 1A 07 01 00 + if (b == 0x52) + { + b = GetByte(stream); start++; + if (b == 0x61) + { + b = GetByte(stream); start++; + if (b != 0x72) continue; + b = GetByte(stream); start++; + if (b != 0x21) continue; + b = GetByte(stream); start++; + if (b != 0x1a) continue; + b = GetByte(stream); start++; + if (b != 0x07) continue; + + b = GetByte(stream); start++; + if (b == 1) + { + b = GetByte(stream); start++; + if (b != 0) continue; + return new MarkHeader(true); // Rar5 + } + else if (b == 0) + { + return new MarkHeader(false); // Rar4 + } + } + else if (b == 0x45) + { + b = GetByte(stream); start++; + if (b != 0x7e) continue; + b = GetByte(stream); start++; + if (b != 0x5e) continue; + throw new InvalidFormatException("Rar format version pre-4 is unsupported."); + } + } + else + { + b = GetByte(stream); start++; + } + } + } + catch (Exception e) + { + if (!leaveStreamOpen) + { + stream.Dispose(); + } + throw new InvalidFormatException("Error trying to read rar signature.", e); + } + + throw new InvalidFormatException("Rar signature not found"); + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/NewSubHeaderType.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/NewSubHeaderType.cs new file mode 100644 index 0000000000..84d50aee2a --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/NewSubHeaderType.cs @@ -0,0 +1,55 @@ +using System; + +namespace SharpCompress.Common.Rar.Headers +{ + internal class NewSubHeaderType : IEquatable + { + internal static readonly NewSubHeaderType SUBHEAD_TYPE_CMT = new NewSubHeaderType('C', 'M', 'T'); + + //internal static final NewSubHeaderType SUBHEAD_TYPE_ACL = new NewSubHeaderType(new byte[]{'A','C','L'}); + + //internal static final NewSubHeaderType SUBHEAD_TYPE_STREAM = new NewSubHeaderType(new byte[]{'S','T','M'}); + + //internal static final NewSubHeaderType SUBHEAD_TYPE_UOWNER = new NewSubHeaderType(new byte[]{'U','O','W'}); + + //internal static final NewSubHeaderType SUBHEAD_TYPE_AV = new NewSubHeaderType(new byte[]{'A','V'}); + + internal static readonly NewSubHeaderType SUBHEAD_TYPE_RR = new NewSubHeaderType('R', 'R'); + + //internal static final NewSubHeaderType SUBHEAD_TYPE_OS2EA = new NewSubHeaderType(new byte[]{'E','A','2'}); + + //internal static final NewSubHeaderType SUBHEAD_TYPE_BEOSEA = new NewSubHeaderType(new byte[]{'E','A','B','E'}); + + private readonly byte[] _bytes; + + private NewSubHeaderType(params char[] chars) + { + _bytes = new byte[chars.Length]; + for (int i = 0; i < chars.Length; ++i) + { + _bytes[i] = (byte)chars[i]; + } + } + + internal bool Equals(byte[] bytes) + { + if (_bytes.Length != bytes.Length) + { + return false; + } + for (int i = 0; i < bytes.Length; ++i) + { + if (_bytes[i] != bytes[i]) + { + return false; + } + } + return true; + } + + public bool Equals(NewSubHeaderType other) + { + return Equals(other._bytes); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ProtectHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ProtectHeader.cs new file mode 100644 index 0000000000..f7f0e8ba9e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/ProtectHeader.cs @@ -0,0 +1,28 @@ +using SharpCompress.IO; + +namespace SharpCompress.Common.Rar.Headers +{ + // ProtectHeader is part of the Recovery Record feature + internal class ProtectHeader : RarHeader + { + public ProtectHeader(RarHeader header, RarCrcBinaryReader reader) + : base(header, reader, HeaderType.Protect) + { + if (IsRar5) throw new InvalidFormatException("unexpected rar5 record"); + } + + protected override void ReadFinish(MarkingBinaryReader reader) + { + Version = reader.ReadByte(); + RecSectors = reader.ReadUInt16(); + TotalBlocks = reader.ReadUInt32(); + Mark = reader.ReadBytes(8); + } + + internal uint DataSize => checked((uint)AdditionalDataSize); + internal byte Version { get; private set; } + internal ushort RecSectors { get; private set; } + internal uint TotalBlocks { get; private set; } + internal byte[] Mark { get; private set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/RarHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/RarHeader.cs new file mode 100644 index 0000000000..b15f650078 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/RarHeader.cs @@ -0,0 +1,130 @@ +using System; +using System.IO; +using SharpCompress.IO; + +namespace SharpCompress.Common.Rar.Headers +{ + // http://www.forensicswiki.org/w/images/5/5b/RARFileStructure.txt + // https://www.rarlab.com/technote.htm + internal class RarHeader : IRarHeader + { + private readonly HeaderType _headerType; + private readonly bool _isRar5; + + internal static RarHeader TryReadBase(RarCrcBinaryReader reader, bool isRar5, ArchiveEncoding archiveEncoding) + { + try + { + return new RarHeader(reader, isRar5, archiveEncoding); + } + catch (EndOfStreamException) + { + return null; + } + } + + private RarHeader(RarCrcBinaryReader reader, bool isRar5, ArchiveEncoding archiveEncoding) + { + _headerType = HeaderType.Null; + _isRar5 = isRar5; + ArchiveEncoding = archiveEncoding; + if (IsRar5) + { + HeaderCrc = reader.ReadUInt32(); + reader.ResetCrc(); + HeaderSize = (int)reader.ReadRarVIntUInt32(3); + reader.Mark(); + HeaderCode = reader.ReadRarVIntByte(); + HeaderFlags = reader.ReadRarVIntUInt16(2); + + if (HasHeaderFlag(HeaderFlagsV5.HAS_EXTRA)) + { + ExtraSize = reader.ReadRarVIntUInt32(); + } + if (HasHeaderFlag(HeaderFlagsV5.HAS_DATA)) + { + AdditionalDataSize = (long)reader.ReadRarVInt(); + } + } else { + reader.Mark(); + HeaderCrc = reader.ReadUInt16(); + reader.ResetCrc(); + HeaderCode = reader.ReadByte(); + HeaderFlags = reader.ReadUInt16(); + HeaderSize = reader.ReadInt16(); + if (HasHeaderFlag(HeaderFlagsV4.HAS_DATA)) + { + AdditionalDataSize = reader.ReadUInt32(); + } + } + } + + protected RarHeader(RarHeader header, RarCrcBinaryReader reader, HeaderType headerType) { + _headerType = headerType; + _isRar5 = header.IsRar5; + HeaderCrc = header.HeaderCrc; + HeaderCode = header.HeaderCode; + HeaderFlags = header.HeaderFlags; + HeaderSize = header.HeaderSize; + ExtraSize = header.ExtraSize; + AdditionalDataSize = header.AdditionalDataSize; + ArchiveEncoding = header.ArchiveEncoding; + ReadFinish(reader); + + int n = RemainingHeaderBytes(reader); + if (n > 0) + { + reader.ReadBytes(n); + } + + VerifyHeaderCrc(reader.GetCrc32()); + } + + protected int RemainingHeaderBytes(MarkingBinaryReader reader) { + return checked(HeaderSize - (int)reader.CurrentReadByteCount); + } + + protected virtual void ReadFinish(MarkingBinaryReader reader) + { + throw new NotImplementedException(); + } + + private void VerifyHeaderCrc(uint crc32) + { + var b = (IsRar5 ? crc32 : (ushort)crc32) == HeaderCrc; + if (!b) + { + throw new InvalidFormatException("rar header crc mismatch"); + } + } + + public HeaderType HeaderType => _headerType; + + protected bool IsRar5 => _isRar5; + + protected uint HeaderCrc { get; } + + internal byte HeaderCode { get; } + + protected ushort HeaderFlags { get; } + + protected bool HasHeaderFlag(ushort flag) + { + return (HeaderFlags & flag) == flag; + } + + protected int HeaderSize { get; } + + internal ArchiveEncoding ArchiveEncoding { get; } + + /// + /// Extra header size. + /// + protected uint ExtraSize { get; } + + /// + /// Size of additional data (eg file contents) + /// + protected long AdditionalDataSize { get; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/RarHeaderFactory.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/RarHeaderFactory.cs new file mode 100644 index 0000000000..8c4f34f7ac --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/RarHeaderFactory.cs @@ -0,0 +1,192 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Common.Rar.Headers +{ + internal class RarHeaderFactory + { + private bool _isRar5; + + internal RarHeaderFactory(StreamingMode mode, ReaderOptions options) + { + StreamingMode = mode; + Options = options; + } + + private ReaderOptions Options { get; } + internal StreamingMode StreamingMode { get; } + internal bool IsEncrypted { get; private set; } + + internal IEnumerable ReadHeaders(Stream stream) + { + var markHeader = MarkHeader.Read(stream, Options.LeaveStreamOpen, Options.LookForHeader); + _isRar5 = markHeader.IsRar5; + yield return markHeader; + + RarHeader header; + while ((header = TryReadNextHeader(stream)) != null) + { + yield return header; + if (header.HeaderType == HeaderType.EndArchive) + { + // End of archive marker. RAR does not read anything after this header letting to use third + // party tools to add extra information such as a digital signature to archive. + yield break; + } + } + } + + private RarHeader TryReadNextHeader(Stream stream) + { + RarCrcBinaryReader reader; + if (!IsEncrypted) + { + reader = new RarCrcBinaryReader(stream); + } + else + { +#if !NO_CRYPTO + if (Options.Password == null) + { + throw new CryptographicException("Encrypted Rar archive has no password specified."); + } + reader = new RarCryptoBinaryReader(stream, Options.Password); +#else + throw new CryptographicException("Rar encryption unsupported on this platform"); +#endif + } + + var header = RarHeader.TryReadBase(reader, _isRar5, Options.ArchiveEncoding); + if (header == null) + { + return null; + } + switch (header.HeaderCode) + { + case HeaderCodeV.RAR5_ARCHIVE_HEADER: + case HeaderCodeV.RAR4_ARCHIVE_HEADER: + { + var ah = new ArchiveHeader(header, reader); + if (ah.IsEncrypted == true) + { + //!!! rar5 we don't know yet + IsEncrypted = true; + } + return ah; + } + + case HeaderCodeV.RAR4_PROTECT_HEADER: + { + var ph = new ProtectHeader(header, reader); + // skip the recovery record data, we do not use it. + switch (StreamingMode) + { + case StreamingMode.Seekable: + { + reader.BaseStream.Position += ph.DataSize; + } + break; + case StreamingMode.Streaming: + { + reader.BaseStream.Skip(ph.DataSize); + } + break; + default: + { + throw new InvalidFormatException("Invalid StreamingMode"); + } + } + + return ph; + } + + case HeaderCodeV.RAR5_SERVICE_HEADER: + { + var fh = new FileHeader(header, reader, HeaderType.Service); + SkipData(fh, reader); + return fh; + } + + case HeaderCodeV.RAR4_NEW_SUB_HEADER: + { + var fh = new FileHeader(header, reader, HeaderType.NewSub); + SkipData(fh, reader); + return fh; + } + + case HeaderCodeV.RAR5_FILE_HEADER: + case HeaderCodeV.RAR4_FILE_HEADER: + { + var fh = new FileHeader(header, reader, HeaderType.File); + switch (StreamingMode) + { + case StreamingMode.Seekable: + { + fh.DataStartPosition = reader.BaseStream.Position; + reader.BaseStream.Position += fh.CompressedSize; + } + break; + case StreamingMode.Streaming: + { + var ms = new ReadOnlySubStream(reader.BaseStream, fh.CompressedSize); + if (fh.R4Salt == null) + { + fh.PackedStream = ms; + } + else + { +#if !NO_CRYPTO + fh.PackedStream = new RarCryptoWrapper(ms, Options.Password, fh.R4Salt); +#else + throw new NotSupportedException("RarCrypto not supported"); +#endif + } + } + break; + default: + { + throw new InvalidFormatException("Invalid StreamingMode"); + } + } + return fh; + } + case HeaderCodeV.RAR5_END_ARCHIVE_HEADER: + case HeaderCodeV.RAR4_END_ARCHIVE_HEADER: + { + return new EndArchiveHeader(header, reader); + } + case HeaderCodeV.RAR5_ARCHIVE_ENCRYPTION_HEADER: + { + var ch = new ArchiveCryptHeader(header, reader); + IsEncrypted = true; + return ch; + } + default: + { + throw new InvalidFormatException("Unknown Rar Header: " + header.HeaderCode); + } + } + } + + private void SkipData(FileHeader fh, RarCrcBinaryReader reader) { + switch (StreamingMode) { + case StreamingMode.Seekable: { + fh.DataStartPosition = reader.BaseStream.Position; + reader.BaseStream.Position += fh.CompressedSize; + } + break; + case StreamingMode.Streaming: { + //skip the data because it's useless? + reader.BaseStream.Skip(fh.CompressedSize); + } + break; + default: { + throw new InvalidFormatException("Invalid StreamingMode"); + } + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/SignHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/SignHeader.cs new file mode 100644 index 0000000000..58b3baefbd --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/Headers/SignHeader.cs @@ -0,0 +1,26 @@ +using SharpCompress.IO; + +namespace SharpCompress.Common.Rar.Headers +{ + internal class SignHeader : RarHeader + { + protected SignHeader(RarHeader header, RarCrcBinaryReader reader) + : base(header, reader, HeaderType.Sign) + { + if (IsRar5) throw new InvalidFormatException("unexpected rar5 record"); + } + + protected override void ReadFinish(MarkingBinaryReader reader) + { + CreationTime = reader.ReadInt32(); + ArcNameSize = reader.ReadInt16(); + UserNameSize = reader.ReadInt16(); + } + + internal int CreationTime { get; private set; } + + internal short ArcNameSize { get; private set; } + + internal short UserNameSize { get; private set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/RarCrcBinaryReader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarCrcBinaryReader.cs new file mode 100644 index 0000000000..ddfd8b9848 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarCrcBinaryReader.cs @@ -0,0 +1,50 @@ +using System.IO; +using SharpCompress.Compressors.Rar; +using SharpCompress.IO; + +namespace SharpCompress.Common.Rar +{ + internal class RarCrcBinaryReader : MarkingBinaryReader + { + private uint _currentCrc; + + public RarCrcBinaryReader(Stream stream) + : base(stream) + { + } + + public uint GetCrc32() + { + return ~_currentCrc; + } + + public void ResetCrc() + { + _currentCrc = 0xffffffff; + } + + protected void UpdateCrc(byte b) + { + _currentCrc = RarCRC.CheckCrc(_currentCrc, b); + } + + protected byte[] ReadBytesNoCrc(int count) + { + return base.ReadBytes(count); + } + + public override byte ReadByte() + { + var b = base.ReadByte(); + _currentCrc = RarCRC.CheckCrc(_currentCrc, b); + return b; + } + + public override byte[] ReadBytes(int count) + { + var result = base.ReadBytes(count); + _currentCrc = RarCRC.CheckCrc(_currentCrc, result, 0, result.Length); + return result; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/RarCryptoBinaryReader.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarCryptoBinaryReader.cs new file mode 100644 index 0000000000..66c0d60688 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarCryptoBinaryReader.cs @@ -0,0 +1,115 @@ +#if !NO_CRYPTO +using System.Collections.Generic; +using System.IO; + +namespace SharpCompress.Common.Rar +{ + internal class RarCryptoBinaryReader : RarCrcBinaryReader + { + private RarRijndael _rijndael; + private byte[] _salt; + private readonly string _password; + private readonly Queue _data = new Queue(); + private long _readCount; + + public RarCryptoBinaryReader(Stream stream, string password) + : base(stream) + { + _password = password; + + // coderb: not sure why this was being done at this logical point + //SkipQueue(); + byte[] salt = ReadBytes(8); + InitializeAes(salt); + } + + // track read count ourselves rather than using the underlying stream since we buffer + public override long CurrentReadByteCount + { + get => _readCount; + protected set + { + // ignore + } + } + + public override void Mark() + { + _readCount = 0; + } + + private bool UseEncryption => _salt != null; + + internal void InitializeAes(byte[] salt) + { + _salt = salt; + _rijndael = RarRijndael.InitializeFrom(_password, salt); + } + + public override byte ReadByte() + { + if (UseEncryption) + { + return ReadAndDecryptBytes(1)[0]; + } + + _readCount++; + return base.ReadByte(); + } + + public override byte[] ReadBytes(int count) + { + if (UseEncryption) + { + return ReadAndDecryptBytes(count); + } + + _readCount += count; + return base.ReadBytes(count); + } + + private byte[] ReadAndDecryptBytes(int count) + { + int queueSize = _data.Count; + int sizeToRead = count - queueSize; + + if (sizeToRead > 0) + { + int alignedSize = sizeToRead + ((~sizeToRead + 1) & 0xf); + for (int i = 0; i < alignedSize / 16; i++) + { + //long ax = System.currentTimeMillis(); + byte[] cipherText = ReadBytesNoCrc(16); + var readBytes = _rijndael.ProcessBlock(cipherText); + foreach (var readByte in readBytes) + _data.Enqueue(readByte); + } + } + + var decryptedBytes = new byte[count]; + + for (int i = 0; i < count; i++) + { + var b = _data.Dequeue(); + decryptedBytes[i] = b; + UpdateCrc(b); + } + + _readCount += count; + return decryptedBytes; + } + + public void ClearQueue() + { + _data.Clear(); + } + + public void SkipQueue() + { + var position = BaseStream.Position; + BaseStream.Position = position + _data.Count; + ClearQueue(); + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/RarCryptoWrapper.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarCryptoWrapper.cs new file mode 100644 index 0000000000..50e1520ce8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarCryptoWrapper.cs @@ -0,0 +1,99 @@ + +#if !NO_CRYPTO +using System; +using System.Collections.Generic; +using System.IO; + +namespace SharpCompress.Common.Rar +{ + internal class RarCryptoWrapper : Stream + { + private readonly Stream _actualStream; + private readonly byte[] _salt; + private RarRijndael _rijndael; + private readonly Queue _data = new Queue(); + + public RarCryptoWrapper(Stream actualStream, string password, byte[] salt) + { + _actualStream = actualStream; + _salt = salt; + _rijndael = RarRijndael.InitializeFrom(password, salt); + } + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override int Read(byte[] buffer, int offset, int count) + { + if (_salt == null) + { + return _actualStream.Read(buffer, offset, count); + } + return ReadAndDecrypt(buffer, offset, count); + } + + public int ReadAndDecrypt(byte[] buffer, int offset, int count) + { + int queueSize = _data.Count; + int sizeToRead = count - queueSize; + + if (sizeToRead > 0) + { + int alignedSize = sizeToRead + ((~sizeToRead + 1) & 0xf); + for (int i = 0; i < alignedSize / 16; i++) + { + //long ax = System.currentTimeMillis(); + byte[] cipherText = new byte[RarRijndael.CRYPTO_BLOCK_SIZE]; + _actualStream.Read(cipherText, 0, RarRijndael.CRYPTO_BLOCK_SIZE); + + var readBytes = _rijndael.ProcessBlock(cipherText); + foreach (var readByte in readBytes) + _data.Enqueue(readByte); + + } + + for (int i = 0; i < count; i++) + buffer[offset + i] = _data.Dequeue(); + } + return count; + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override long Length => throw new NotSupportedException(); + + public override long Position { get; set; } + + protected override void Dispose(bool disposing) + { + if (_rijndael != null) + { + _rijndael.Dispose(); + _rijndael = null; + } + base.Dispose(disposing); + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/RarEntry.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarEntry.cs new file mode 100644 index 0000000000..c461f42a44 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarEntry.cs @@ -0,0 +1,65 @@ +using System; +using SharpCompress.Common.Rar.Headers; + +namespace SharpCompress.Common.Rar +{ + public abstract class RarEntry : Entry + { + internal abstract FileHeader FileHeader { get; } + + /// + /// As the V2017 port isn't complete, add this check to use the legacy Rar code. + /// + internal bool IsRarV3 => FileHeader.CompressionAlgorithm == 29 || FileHeader.CompressionAlgorithm == 36; + + /// + /// The File's 32 bit CRC Hash + /// + public override long Crc => FileHeader.FileCrc; + + /// + /// The path of the file internal to the Rar Archive. + /// + public override string Key => FileHeader.FileName; + + public override string LinkTarget => null; + + /// + /// The entry last modified time in the archive, if recorded + /// + public override DateTime? LastModifiedTime => FileHeader.FileLastModifiedTime; + + /// + /// The entry create time in the archive, if recorded + /// + public override DateTime? CreatedTime => FileHeader.FileCreatedTime; + + /// + /// The entry last accessed time in the archive, if recorded + /// + public override DateTime? LastAccessedTime => FileHeader.FileLastAccessedTime; + + /// + /// The entry time whend archived, if recorded + /// + public override DateTime? ArchivedTime => FileHeader.FileArchivedTime; + + /// + /// Entry is password protected and encrypted and cannot be extracted. + /// + public override bool IsEncrypted => FileHeader.IsEncrypted; + + /// + /// Entry is password protected and encrypted and cannot be extracted. + /// + public override bool IsDirectory => FileHeader.IsDirectory; + + public override bool IsSplitAfter => FileHeader.IsSplitAfter; + + public override string ToString() + { + return string.Format("Entry Path: {0} Compressed Size: {1} Uncompressed Size: {2} CRC: {3}", + Key, CompressedSize, Size, Crc); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/RarFilePart.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarFilePart.cs new file mode 100644 index 0000000000..5c05cc8091 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarFilePart.cs @@ -0,0 +1,27 @@ +using System.IO; +using SharpCompress.Common.Rar.Headers; + +namespace SharpCompress.Common.Rar +{ + /// + /// This represents a single file part that exists in a rar volume. A compressed file is one or many file parts that are spread across one or may rar parts. + /// + internal abstract class RarFilePart : FilePart + { + internal RarFilePart(MarkHeader mh, FileHeader fh) + : base(fh.ArchiveEncoding) + { + MarkHeader = mh; + FileHeader = fh; + } + + internal MarkHeader MarkHeader { get; } + + internal FileHeader FileHeader { get; } + + internal override Stream GetRawStream() + { + return null; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/RarRijndael.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarRijndael.cs new file mode 100644 index 0000000000..824cb64e73 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarRijndael.cs @@ -0,0 +1,121 @@ +#if !NO_CRYPTO +using System; +using System.Collections.Generic; +using System.Linq; +using System.Security.Cryptography; +using System.Text; +using SharpCompress.Crypto; + +namespace SharpCompress.Common.Rar +{ + internal class RarRijndael : IDisposable + { + internal const int CRYPTO_BLOCK_SIZE = 16; + + private readonly string _password; + private readonly byte[] _salt; + private byte[] _aesInitializationVector; + private RijndaelEngine _rijndael; + + private RarRijndael(string password, byte[] salt) + { + _password = password; + _salt = salt; + } + + private byte[] ComputeHash(byte[] input) + { + var sha = SHA1.Create(); + return sha.ComputeHash(input); + } + + private void Initialize() + { + + _rijndael = new RijndaelEngine(); + _aesInitializationVector = new byte[CRYPTO_BLOCK_SIZE]; + int rawLength = 2*_password.Length; + byte[] rawPassword = new byte[rawLength + 8]; + byte[] passwordBytes = Encoding.UTF8.GetBytes(_password); + for (int i = 0; i < _password.Length; i++) + { + rawPassword[i*2] = passwordBytes[i]; + rawPassword[i*2 + 1] = 0; + } + for (int i = 0; i < _salt.Length; i++) + { + rawPassword[i + rawLength] = _salt[i]; + } + + + const int noOfRounds = (1 << 18); + IList bytes = new List(); + byte[] digest; + + //TODO slow code below, find ways to optimize + for (int i = 0; i < noOfRounds; i++) + { + bytes.AddRange(rawPassword); + + bytes.AddRange(new[] + { + (byte) i, (byte) (i >> 8), (byte) (i >> CRYPTO_BLOCK_SIZE) + }); + if (i%(noOfRounds/CRYPTO_BLOCK_SIZE) == 0) + { + digest = ComputeHash(bytes.ToArray()); + _aesInitializationVector[i/(noOfRounds/CRYPTO_BLOCK_SIZE)] = digest[19]; + } + } + + digest = ComputeHash(bytes.ToArray()); + //slow code ends + + byte[] aesKey = new byte[CRYPTO_BLOCK_SIZE]; + for (int i = 0; i < 4; i++) + { + for (int j = 0; j < 4; j++) + { + aesKey[i*4 + j] = (byte) + (((digest[i*4]*0x1000000) & 0xff000000 | + (uint) ((digest[i*4 + 1]*0x10000) & 0xff0000) | + (uint) ((digest[i*4 + 2]*0x100) & 0xff00) | + (uint) (digest[i*4 + 3] & 0xff)) >> (j*8)); + } + } + + _rijndael.Init(false, new KeyParameter(aesKey)); + + } + + public static RarRijndael InitializeFrom(string password, byte[] salt) + { + var rijndael = new RarRijndael(password, salt); + rijndael.Initialize(); + return rijndael; + } + + public byte[] ProcessBlock(byte[] cipherText) + { + var plainText = new byte[CRYPTO_BLOCK_SIZE]; + var decryptedBytes = new List(); + _rijndael.ProcessBlock(cipherText, 0, plainText, 0); + + for (int j = 0; j < plainText.Length; j++) + { + decryptedBytes.Add((byte) (plainText[j] ^ _aesInitializationVector[j%16])); //32:114, 33:101 + } + + for (int j = 0; j < _aesInitializationVector.Length; j++) + { + _aesInitializationVector[j] = cipherText[j]; + } + return decryptedBytes.ToArray(); + } + + public void Dispose() + { + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Rar/RarVolume.cs b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarVolume.cs new file mode 100644 index 0000000000..265db11c63 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Rar/RarVolume.cs @@ -0,0 +1,112 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common.Rar.Headers; +using SharpCompress.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Common.Rar +{ + /// + /// A RarArchiveVolume is a single rar file that may or may not be a split RarArchive. A Rar Archive is one to many Rar Parts + /// + public abstract class RarVolume : Volume + { + private readonly RarHeaderFactory _headerFactory; + + internal RarVolume(StreamingMode mode, Stream stream, ReaderOptions options) + : base(stream, options) + { + _headerFactory = new RarHeaderFactory(mode, options); + } + + internal ArchiveHeader ArchiveHeader { get; private set; } + + internal StreamingMode Mode => _headerFactory.StreamingMode; + + internal abstract IEnumerable ReadFileParts(); + + internal abstract RarFilePart CreateFilePart(MarkHeader markHeader, FileHeader fileHeader); + + internal IEnumerable GetVolumeFileParts() + { + MarkHeader lastMarkHeader = null; + foreach (var header in _headerFactory.ReadHeaders(Stream)) + { + switch (header.HeaderType) + { + case HeaderType.Mark: + { + lastMarkHeader = header as MarkHeader; + } + break; + case HeaderType.Archive: + { + ArchiveHeader = header as ArchiveHeader; + } + break; + case HeaderType.File: + { + var fh = header as FileHeader; + yield return CreateFilePart(lastMarkHeader, fh); + } + break; + } + } + } + + private void EnsureArchiveHeaderLoaded() + { + if (ArchiveHeader == null) + { + if (Mode == StreamingMode.Streaming) + { + throw new InvalidOperationException("ArchiveHeader should never been null in a streaming read."); + } + + // we only want to load the archive header to avoid overhead but have to do the nasty thing and reset the stream + GetVolumeFileParts().First(); + Stream.Position = 0; + } + } + + /// + /// RarArchive is the first volume of a multi-part archive. + /// Only Rar 3.0 format and higher + /// + public override bool IsFirstVolume + { + get + { + EnsureArchiveHeaderLoaded(); + return ArchiveHeader.IsFirstVolume; + } + } + + /// + /// RarArchive is part of a multi-part archive. + /// + public override bool IsMultiVolume + { + get + { + EnsureArchiveHeaderLoaded(); + return ArchiveHeader.IsVolume; + } + } + + /// + /// RarArchive is SOLID (this means the Archive saved bytes by reusing information which helps for archives containing many small files). + /// Currently, SharpCompress cannot decompress SOLID archives. + /// + public bool IsSolidArchive + { + get + { + EnsureArchiveHeaderLoaded(); + return ArchiveHeader.IsSolid; + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/ReaderExtractionEventArgs.cs b/BizHawk.Client.Common/SharpCompress/Common/ReaderExtractionEventArgs.cs new file mode 100644 index 0000000000..aadc563c7e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/ReaderExtractionEventArgs.cs @@ -0,0 +1,17 @@ +using System; +using SharpCompress.Readers; + +namespace SharpCompress.Common +{ + public class ReaderExtractionEventArgs : EventArgs + { + internal ReaderExtractionEventArgs(T entry, ReaderProgress readerProgress = null) + { + Item = entry; + ReaderProgress = readerProgress; + } + + public T Item { get; } + public ReaderProgress ReaderProgress { get; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/ArchiveDatabase.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/ArchiveDatabase.cs new file mode 100644 index 0000000000..e827eb20ea --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/ArchiveDatabase.cs @@ -0,0 +1,182 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Compressors.LZMA; +using SharpCompress.Compressors.LZMA.Utilites; + +namespace SharpCompress.Common.SevenZip +{ + internal class ArchiveDatabase + { + internal byte _majorVersion; + internal byte _minorVersion; + internal long _startPositionAfterHeader; + internal long _dataStartPosition; + + internal List _packSizes = new List(); + internal List _packCrCs = new List(); + internal List _folders = new List(); + internal List _numUnpackStreamsVector; + internal List _files = new List(); + + internal List _packStreamStartPositions = new List(); + internal List _folderStartFileIndex = new List(); + internal List _fileIndexToFolderIndexMap = new List(); + + internal IPasswordProvider PasswordProvider { get; } + + public ArchiveDatabase(IPasswordProvider passwordProvider) + { + PasswordProvider = passwordProvider; + } + + internal void Clear() + { + _packSizes.Clear(); + _packCrCs.Clear(); + _folders.Clear(); + _numUnpackStreamsVector = null; + _files.Clear(); + + _packStreamStartPositions.Clear(); + _folderStartFileIndex.Clear(); + _fileIndexToFolderIndexMap.Clear(); + } + + internal bool IsEmpty() + { + return _packSizes.Count == 0 + && _packCrCs.Count == 0 + && _folders.Count == 0 + && _numUnpackStreamsVector.Count == 0 + && _files.Count == 0; + } + + private void FillStartPos() + { + _packStreamStartPositions.Clear(); + + long startPos = 0; + for (int i = 0; i < _packSizes.Count; i++) + { + _packStreamStartPositions.Add(startPos); + startPos += _packSizes[i]; + } + } + + private void FillFolderStartFileIndex() + { + _folderStartFileIndex.Clear(); + _fileIndexToFolderIndexMap.Clear(); + + int folderIndex = 0; + int indexInFolder = 0; + for (int i = 0; i < _files.Count; i++) + { + CFileItem file = _files[i]; + + bool emptyStream = !file.HasStream; + + if (emptyStream && indexInFolder == 0) + { + _fileIndexToFolderIndexMap.Add(-1); + continue; + } + + if (indexInFolder == 0) + { + // v3.13 incorrectly worked with empty folders + // v4.07: Loop for skipping empty folders + for (;;) + { + if (folderIndex >= _folders.Count) + { + throw new InvalidOperationException(); + } + + _folderStartFileIndex.Add(i); // check it + + if (_numUnpackStreamsVector[folderIndex] != 0) + { + break; + } + + folderIndex++; + } + } + + _fileIndexToFolderIndexMap.Add(folderIndex); + + if (emptyStream) + { + continue; + } + + indexInFolder++; + + if (indexInFolder >= _numUnpackStreamsVector[folderIndex]) + { + folderIndex++; + indexInFolder = 0; + } + } + } + + public void Fill() + { + FillStartPos(); + FillFolderStartFileIndex(); + } + + internal long GetFolderStreamPos(CFolder folder, int indexInFolder) + { + int index = folder._firstPackStreamId + indexInFolder; + return _dataStartPosition + _packStreamStartPositions[index]; + } + + internal long GetFolderFullPackSize(int folderIndex) + { + int packStreamIndex = _folders[folderIndex]._firstPackStreamId; + CFolder folder = _folders[folderIndex]; + + long size = 0; + for (int i = 0; i < folder._packStreams.Count; i++) + { + size += _packSizes[packStreamIndex + i]; + } + + return size; + } + + internal Stream GetFolderStream(Stream stream, CFolder folder, IPasswordProvider pw) + { + int packStreamIndex = folder._firstPackStreamId; + long folderStartPackPos = GetFolderStreamPos(folder, 0); + List packSizes = new List(); + for (int j = 0; j < folder._packStreams.Count; j++) + { + packSizes.Add(_packSizes[packStreamIndex + j]); + } + + return DecoderStreamHelper.CreateDecoderStream(stream, folderStartPackPos, packSizes.ToArray(), folder, pw); + } + + private long GetFolderPackStreamSize(int folderIndex, int streamIndex) + { + return _packSizes[_folders[folderIndex]._firstPackStreamId + streamIndex]; + } + + private long GetFilePackSize(int fileIndex) + { + int folderIndex = _fileIndexToFolderIndexMap[fileIndex]; + if (folderIndex != -1) + { + if (_folderStartFileIndex[folderIndex] == fileIndex) + { + return GetFolderFullPackSize(folderIndex); + } + } + return 0; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/ArchiveReader.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/ArchiveReader.cs new file mode 100644 index 0000000000..2093a83106 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/ArchiveReader.cs @@ -0,0 +1,1591 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.IO; +using System.Linq; +using SharpCompress.Compressors.LZMA; +using SharpCompress.Compressors.LZMA.Utilites; +using SharpCompress.IO; + +namespace SharpCompress.Common.SevenZip +{ + internal class ArchiveReader + { + internal Stream _stream; + internal Stack _readerStack = new Stack(); + internal DataReader _currentReader; + internal long _streamOrigin; + internal long _streamEnding; + internal byte[] _header; + + private readonly Dictionary _cachedStreams = new Dictionary(); + + internal void AddByteStream(byte[] buffer, int offset, int length) + { + _readerStack.Push(_currentReader); + _currentReader = new DataReader(buffer, offset, length); + } + + internal void DeleteByteStream() + { + _currentReader = _readerStack.Pop(); + } + + #region Private Methods - Data Reader + + internal Byte ReadByte() + { + return _currentReader.ReadByte(); + } + + private void ReadBytes(byte[] buffer, int offset, int length) + { + _currentReader.ReadBytes(buffer, offset, length); + } + + private ulong ReadNumber() + { + return _currentReader.ReadNumber(); + } + + internal int ReadNum() + { + return _currentReader.ReadNum(); + } + + private uint ReadUInt32() + { + return _currentReader.ReadUInt32(); + } + + private ulong ReadUInt64() + { + return _currentReader.ReadUInt64(); + } + + private BlockType? ReadId() + { + ulong id = _currentReader.ReadNumber(); + if (id > 25) + { + return null; + } +#if DEBUG + Log.WriteLine("ReadId: {0}", (BlockType)id); +#endif + return (BlockType)id; + } + + private void SkipData(long size) + { + _currentReader.SkipData(size); + } + + private void SkipData() + { + _currentReader.SkipData(); + } + + private void WaitAttribute(BlockType attribute) + { + for (;;) + { + BlockType? type = ReadId(); + if (type == attribute) + { + return; + } + if (type == BlockType.End) + { + throw new InvalidOperationException(); + } + SkipData(); + } + } + + private void ReadArchiveProperties() + { + while (ReadId() != BlockType.End) + { + SkipData(); + } + } + + #endregion + + #region Private Methods - Reader Utilities + + private BitVector ReadBitVector(int length) + { + var bits = new BitVector(length); + + byte data = 0; + byte mask = 0; + + for (int i = 0; i < length; i++) + { + if (mask == 0) + { + data = ReadByte(); + mask = 0x80; + } + + if ((data & mask) != 0) + { + bits.SetBit(i); + } + + mask >>= 1; + } + + return bits; + } + + private BitVector ReadOptionalBitVector(int length) + { + byte allTrue = ReadByte(); + if (allTrue != 0) + { + return new BitVector(length, true); + } + + return ReadBitVector(length); + } + + private void ReadNumberVector(List dataVector, int numFiles, Action action) + { + var defined = ReadOptionalBitVector(numFiles); + + using (CStreamSwitch streamSwitch = new CStreamSwitch()) + { + streamSwitch.Set(this, dataVector); + + for (int i = 0; i < numFiles; i++) + { + if (defined[i]) + { + action(i, checked((long)ReadUInt64())); + } + else + { + action(i, null); + } + } + } + } + + private DateTime TranslateTime(long time) + { + // FILETIME = 100-nanosecond intervals since January 1, 1601 (UTC) + return DateTime.FromFileTimeUtc(time).ToLocalTime(); + } + + private DateTime? TranslateTime(long? time) + { + if (time.HasValue && time.Value >= 0 && time.Value <= 2650467743999999999) //maximum Windows file time 31.12.9999 + { + return TranslateTime(time.Value); + } + return null; + } + + private void ReadDateTimeVector(List dataVector, int numFiles, Action action) + { + ReadNumberVector(dataVector, numFiles, (index, value) => action(index, TranslateTime(value))); + } + + private void ReadAttributeVector(List dataVector, int numFiles, Action action) + { + BitVector boolVector = ReadOptionalBitVector(numFiles); + using (var streamSwitch = new CStreamSwitch()) + { + streamSwitch.Set(this, dataVector); + for (int i = 0; i < numFiles; i++) + { + if (boolVector[i]) + { + action(i, ReadUInt32()); + } + else + { + action(i, null); + } + } + } + } + + #endregion + + #region Private Methods + + private void GetNextFolderItem(CFolder folder) + { +#if DEBUG + Log.WriteLine("-- GetNextFolderItem --"); + Log.PushIndent(); +#endif + try + { + int numCoders = ReadNum(); +#if DEBUG + Log.WriteLine("NumCoders: " + numCoders); +#endif + folder._coders = new List(numCoders); + int numInStreams = 0; + int numOutStreams = 0; + for (int i = 0; i < numCoders; i++) + { +#if DEBUG + Log.WriteLine("-- Coder --"); + Log.PushIndent(); +#endif + try + { + CCoderInfo coder = new CCoderInfo(); + folder._coders.Add(coder); + + byte mainByte = ReadByte(); + int idSize = (mainByte & 0xF); + byte[] longId = new byte[idSize]; + ReadBytes(longId, 0, idSize); +#if DEBUG + Log.WriteLine("MethodId: " + String.Join("", Enumerable.Range(0, idSize).Select(x => longId[x].ToString("x2")).ToArray())); +#endif + if (idSize > 8) + { + throw new NotSupportedException(); + } + ulong id = 0; + for (int j = 0; j < idSize; j++) + { + id |= (ulong)longId[idSize - 1 - j] << (8 * j); + } + coder._methodId = new CMethodId(id); + + if ((mainByte & 0x10) != 0) + { + coder._numInStreams = ReadNum(); + coder._numOutStreams = ReadNum(); +#if DEBUG + Log.WriteLine("Complex Stream (In: " + coder._numInStreams + " - Out: " + coder._numOutStreams + ")"); +#endif + } + else + { +#if DEBUG + Log.WriteLine("Simple Stream (In: 1 - Out: 1)"); +#endif + coder._numInStreams = 1; + coder._numOutStreams = 1; + } + + if ((mainByte & 0x20) != 0) + { + int propsSize = ReadNum(); + coder._props = new byte[propsSize]; + ReadBytes(coder._props, 0, propsSize); +#if DEBUG + Log.WriteLine("Settings: " + String.Join("", coder._props.Select(bt => bt.ToString("x2")).ToArray())); +#endif + } + + if ((mainByte & 0x80) != 0) + { + throw new NotSupportedException(); + } + + numInStreams += coder._numInStreams; + numOutStreams += coder._numOutStreams; + } + finally + { +#if DEBUG + Log.PopIndent(); +#endif + } + } + + int numBindPairs = numOutStreams - 1; + folder._bindPairs = new List(numBindPairs); +#if DEBUG + Log.WriteLine("BindPairs: " + numBindPairs); + Log.PushIndent(); +#endif + for (int i = 0; i < numBindPairs; i++) + { + CBindPair bp = new CBindPair(); + bp._inIndex = ReadNum(); + bp._outIndex = ReadNum(); + folder._bindPairs.Add(bp); +#if DEBUG + Log.WriteLine("#" + i + " - In: " + bp._inIndex + " - Out: " + bp._outIndex); +#endif + } +#if DEBUG + Log.PopIndent(); +#endif + + if (numInStreams < numBindPairs) + { + throw new NotSupportedException(); + } + + int numPackStreams = numInStreams - numBindPairs; + + //folder.PackStreams.Reserve(numPackStreams); + if (numPackStreams == 1) + { + for (int i = 0; i < numInStreams; i++) + { + if (folder.FindBindPairForInStream(i) < 0) + { +#if DEBUG + Log.WriteLine("Single PackStream: #" + i); +#endif + folder._packStreams.Add(i); + break; + } + } + + if (folder._packStreams.Count != 1) + { + throw new NotSupportedException(); + } + } + else + { +#if DEBUG + Log.WriteLine("Multiple PackStreams ..."); + Log.PushIndent(); +#endif + for (int i = 0; i < numPackStreams; i++) + { + var num = ReadNum(); +#if DEBUG + Log.WriteLine("#" + i + " - " + num); +#endif + folder._packStreams.Add(num); + } +#if DEBUG + Log.PopIndent(); +#endif + } + } + finally + { +#if DEBUG + Log.PopIndent(); +#endif + } + } + + private List ReadHashDigests(int count) + { +#if DEBUG + Log.Write("ReadHashDigests:"); +#endif + + var defined = ReadOptionalBitVector(count); + var digests = new List(count); + for (int i = 0; i < count; i++) + { + if (defined[i]) + { + uint crc = ReadUInt32(); +#if DEBUG + Log.Write(" " + crc.ToString("x8")); +#endif + digests.Add(crc); + } + else + { +#if DEBUG + Log.Write(" ########"); +#endif + digests.Add(null); + } + } +#if DEBUG + + Log.WriteLine(); +#endif + return digests; + } + + private void ReadPackInfo(out long dataOffset, out List packSizes, out List packCrCs) + { +#if DEBUG + Log.WriteLine("-- ReadPackInfo --"); + Log.PushIndent(); +#endif + try + { + packCrCs = null; + + dataOffset = checked((long)ReadNumber()); +#if DEBUG + Log.WriteLine("DataOffset: " + dataOffset); +#endif + + int numPackStreams = ReadNum(); +#if DEBUG + Log.WriteLine("NumPackStreams: " + numPackStreams); +#endif + + WaitAttribute(BlockType.Size); + packSizes = new List(numPackStreams); +#if DEBUG + Log.Write("Sizes:"); +#endif + for (int i = 0; i < numPackStreams; i++) + { + var size = checked((long)ReadNumber()); +#if DEBUG + Log.Write(" " + size); +#endif + packSizes.Add(size); + } +#if DEBUG + Log.WriteLine(); +#endif + + BlockType? type; + for (;;) + { + type = ReadId(); + if (type == BlockType.End) + { + break; + } + if (type == BlockType.Crc) + { + packCrCs = ReadHashDigests(numPackStreams); + continue; + } + SkipData(); + } + + if (packCrCs == null) + { + packCrCs = new List(numPackStreams); + for (int i = 0; i < numPackStreams; i++) + { + packCrCs.Add(null); + } + } + } + finally + { +#if DEBUG + Log.PopIndent(); +#endif + } + } + + private void ReadUnpackInfo(List dataVector, out List folders) + { +#if DEBUG + Log.WriteLine("-- ReadUnpackInfo --"); + Log.PushIndent(); +#endif + try + { + WaitAttribute(BlockType.Folder); + int numFolders = ReadNum(); +#if DEBUG + Log.WriteLine("NumFolders: {0}", numFolders); +#endif + + using (CStreamSwitch streamSwitch = new CStreamSwitch()) + { + streamSwitch.Set(this, dataVector); + + //folders.Clear(); + //folders.Reserve(numFolders); + folders = new List(numFolders); + int index = 0; + for (int i = 0; i < numFolders; i++) + { + var f = new CFolder {_firstPackStreamId = index}; + folders.Add(f); + GetNextFolderItem(f); + index += f._packStreams.Count; + } + } + + WaitAttribute(BlockType.CodersUnpackSize); +#if DEBUG + Log.WriteLine("UnpackSizes:"); +#endif + for (int i = 0; i < numFolders; i++) + { + CFolder folder = folders[i]; +#if DEBUG + Log.Write(" #" + i + ":"); +#endif + int numOutStreams = folder.GetNumOutStreams(); + for (int j = 0; j < numOutStreams; j++) + { + long size = checked((long)ReadNumber()); +#if DEBUG + Log.Write(" " + size); +#endif + folder._unpackSizes.Add(size); + } +#if DEBUG + Log.WriteLine(); +#endif + } + + for (;;) + { + BlockType? type = ReadId(); + if (type == BlockType.End) + { + return; + } + + if (type == BlockType.Crc) + { + List crcs = ReadHashDigests(numFolders); + for (int i = 0; i < numFolders; i++) + { + folders[i]._unpackCrc = crcs[i]; + } + continue; + } + + SkipData(); + } + } + finally + { +#if DEBUG + Log.PopIndent(); +#endif + } + } + + private void ReadSubStreamsInfo(List folders, out List numUnpackStreamsInFolders, + out List unpackSizes, out List digests) + { +#if DEBUG + Log.WriteLine("-- ReadSubStreamsInfo --"); + Log.PushIndent(); +#endif + try + { + numUnpackStreamsInFolders = null; + + BlockType? type; + for (;;) + { + type = ReadId(); + if (type == BlockType.NumUnpackStream) + { + numUnpackStreamsInFolders = new List(folders.Count); +#if DEBUG + Log.Write("NumUnpackStreams:"); +#endif + for (int i = 0; i < folders.Count; i++) + { + var num = ReadNum(); +#if DEBUG + Log.Write(" " + num); +#endif + numUnpackStreamsInFolders.Add(num); + } +#if DEBUG + Log.WriteLine(); +#endif + continue; + } + if (type == BlockType.Crc || type == BlockType.Size) + { + break; + } + if (type == BlockType.End) + { + break; + } + SkipData(); + } + + if (numUnpackStreamsInFolders == null) + { + numUnpackStreamsInFolders = new List(folders.Count); + for (int i = 0; i < folders.Count; i++) + { + numUnpackStreamsInFolders.Add(1); + } + } + + unpackSizes = new List(folders.Count); + for (int i = 0; i < numUnpackStreamsInFolders.Count; i++) + { + // v3.13 incorrectly worked with empty folders + // v4.07: we check that folder is empty + int numSubstreams = numUnpackStreamsInFolders[i]; + if (numSubstreams == 0) + { + continue; + } +#if DEBUG + Log.Write("#{0} StreamSizes:", i); +#endif + long sum = 0; + for (int j = 1; j < numSubstreams; j++) + { + if (type == BlockType.Size) + { + long size = checked((long)ReadNumber()); +#if DEBUG + Log.Write(" " + size); +#endif + unpackSizes.Add(size); + sum += size; + } + } + unpackSizes.Add(folders[i].GetUnpackSize() - sum); +#if DEBUG + Log.WriteLine(" - rest: " + unpackSizes.Last()); +#endif + } + if (type == BlockType.Size) + { + type = ReadId(); + } + + int numDigests = 0; + int numDigestsTotal = 0; + for (int i = 0; i < folders.Count; i++) + { + int numSubstreams = numUnpackStreamsInFolders[i]; + if (numSubstreams != 1 || !folders[i].UnpackCrcDefined) + { + numDigests += numSubstreams; + } + numDigestsTotal += numSubstreams; + } + + digests = null; + + for (;;) + { + if (type == BlockType.Crc) + { + digests = new List(numDigestsTotal); + + List digests2 = ReadHashDigests(numDigests); + + int digestIndex = 0; + for (int i = 0; i < folders.Count; i++) + { + int numSubstreams = numUnpackStreamsInFolders[i]; + CFolder folder = folders[i]; + if (numSubstreams == 1 && folder.UnpackCrcDefined) + { + digests.Add(folder._unpackCrc.Value); + } + else + { + for (int j = 0; j < numSubstreams; j++, digestIndex++) + { + digests.Add(digests2[digestIndex]); + } + } + } + + if (digestIndex != numDigests || numDigestsTotal != digests.Count) + { + Debugger.Break(); + } + } + else if (type == BlockType.End) + { + if (digests == null) + { + digests = new List(numDigestsTotal); + for (int i = 0; i < numDigestsTotal; i++) + { + digests.Add(null); + } + } + return; + } + else + { + SkipData(); + } + + type = ReadId(); + } + } + finally + { +#if DEBUG + Log.PopIndent(); +#endif + } + } + + private void ReadStreamsInfo( + List dataVector, + out long dataOffset, + out List packSizes, + out List packCrCs, + out List folders, + out List numUnpackStreamsInFolders, + out List unpackSizes, + out List digests) + { +#if DEBUG + Log.WriteLine("-- ReadStreamsInfo --"); + Log.PushIndent(); +#endif + try + { + dataOffset = long.MinValue; + packSizes = null; + packCrCs = null; + folders = null; + numUnpackStreamsInFolders = null; + unpackSizes = null; + digests = null; + + for (;;) + { + switch (ReadId()) + { + case BlockType.End: + return; + case BlockType.PackInfo: + ReadPackInfo(out dataOffset, out packSizes, out packCrCs); + break; + case BlockType.UnpackInfo: + ReadUnpackInfo(dataVector, out folders); + break; + case BlockType.SubStreamsInfo: + ReadSubStreamsInfo(folders, out numUnpackStreamsInFolders, out unpackSizes, out digests); + break; + default: + throw new InvalidOperationException(); + } + } + } + finally + { +#if DEBUG + Log.PopIndent(); +#endif + } + } + + private List ReadAndDecodePackedStreams(long baseOffset, IPasswordProvider pass) + { +#if DEBUG + Log.WriteLine("-- ReadAndDecodePackedStreams --"); + Log.PushIndent(); +#endif + try + { + long dataStartPos; + List packSizes; + List packCrCs; + List folders; + List numUnpackStreamsInFolders; + List unpackSizes; + List digests; + + ReadStreamsInfo(null, + out dataStartPos, + out packSizes, + out packCrCs, + out folders, + out numUnpackStreamsInFolders, + out unpackSizes, + out digests); + + dataStartPos += baseOffset; + + var dataVector = new List(folders.Count); + int packIndex = 0; + foreach (var folder in folders) + { + long oldDataStartPos = dataStartPos; + long[] myPackSizes = new long[folder._packStreams.Count]; + for (int i = 0; i < myPackSizes.Length; i++) + { + long packSize = packSizes[packIndex + i]; + myPackSizes[i] = packSize; + dataStartPos += packSize; + } + + var outStream = DecoderStreamHelper.CreateDecoderStream(_stream, oldDataStartPos, myPackSizes, + folder, pass); + + int unpackSize = checked((int)folder.GetUnpackSize()); + byte[] data = new byte[unpackSize]; + outStream.ReadExact(data, 0, data.Length); + if (outStream.ReadByte() >= 0) + { + throw new InvalidOperationException("Decoded stream is longer than expected."); + } + dataVector.Add(data); + + if (folder.UnpackCrcDefined) + { + if (Crc.Finish(Crc.Update(Crc.INIT_CRC, data, 0, unpackSize)) != folder._unpackCrc) + { + throw new InvalidOperationException("Decoded stream does not match expected CRC."); + } + } + } + return dataVector; + } + finally + { +#if DEBUG + Log.PopIndent(); +#endif + } + } + + private void ReadHeader(ArchiveDatabase db, IPasswordProvider getTextPassword) + { +#if DEBUG + Log.WriteLine("-- ReadHeader --"); + Log.PushIndent(); +#endif + try + { + BlockType? type = ReadId(); + + if (type == BlockType.ArchiveProperties) + { + ReadArchiveProperties(); + type = ReadId(); + } + + List dataVector = null; + if (type == BlockType.AdditionalStreamsInfo) + { + dataVector = ReadAndDecodePackedStreams(db._startPositionAfterHeader, getTextPassword); + type = ReadId(); + } + + List unpackSizes; + List digests; + + if (type == BlockType.MainStreamsInfo) + { + ReadStreamsInfo(dataVector, + out db._dataStartPosition, + out db._packSizes, + out db._packCrCs, + out db._folders, + out db._numUnpackStreamsVector, + out unpackSizes, + out digests); + + db._dataStartPosition += db._startPositionAfterHeader; + type = ReadId(); + } + else + { + unpackSizes = new List(db._folders.Count); + digests = new List(db._folders.Count); + db._numUnpackStreamsVector = new List(db._folders.Count); + for (int i = 0; i < db._folders.Count; i++) + { + var folder = db._folders[i]; + unpackSizes.Add(folder.GetUnpackSize()); + digests.Add(folder._unpackCrc); + db._numUnpackStreamsVector.Add(1); + } + } + + db._files.Clear(); + + if (type == BlockType.End) + { + return; + } + + if (type != BlockType.FilesInfo) + { + throw new InvalidOperationException(); + } + + int numFiles = ReadNum(); +#if DEBUG + Log.WriteLine("NumFiles: " + numFiles); +#endif + db._files = new List(numFiles); + for (int i = 0; i < numFiles; i++) + { + db._files.Add(new CFileItem()); + } + + BitVector emptyStreamVector = new BitVector(numFiles); + BitVector emptyFileVector = null; + BitVector antiFileVector = null; + int numEmptyStreams = 0; + + for (;;) + { + type = ReadId(); + if (type == BlockType.End) + { + break; + } + + long size = checked((long)ReadNumber()); // TODO: throw invalid data on negative + int oldPos = _currentReader.Offset; + switch (type) + { + case BlockType.Name: + using (var streamSwitch = new CStreamSwitch()) + { + streamSwitch.Set(this, dataVector); +#if DEBUG + Log.Write("FileNames:"); +#endif + for (int i = 0; i < db._files.Count; i++) + { + db._files[i].Name = _currentReader.ReadString(); +#if DEBUG + Log.Write(" " + db._files[i].Name); +#endif + } +#if DEBUG + Log.WriteLine(); +#endif + } + break; + case BlockType.WinAttributes: +#if DEBUG + Log.Write("WinAttributes:"); +#endif + ReadAttributeVector(dataVector, numFiles, delegate(int i, uint? attr) + { + // Some third party implementations established an unofficial extension + // of the 7z archive format by placing posix file attributes in the high + // bits of the windows file attributes. This makes use of the fact that + // the official implementation does not perform checks on this value. + // + // Newer versions of the official 7z GUI client will try to parse this + // extension, thus acknowledging the unofficial use of these bits. + // + // For us it is safe to just discard the upper bits if they are set and + // keep the windows attributes from the lower bits (which should be set + // properly even if posix file attributes are present, in order to be + // compatible with older 7z archive readers) + // + // Note that the 15th bit is used by some implementations to indicate + // presence of the extension, but not all implementations do that so + // we can't trust that bit and must ignore it. + // + if (attr.HasValue && (attr.Value >> 16) != 0) + { + attr = attr.Value & 0x7FFFu; + } + + db._files[i].Attrib = attr; +#if DEBUG + Log.Write(" " + (attr.HasValue ? attr.Value.ToString("x8") : "n/a")); +#endif + }); +#if DEBUG + Log.WriteLine(); +#endif + break; + case BlockType.EmptyStream: + emptyStreamVector = ReadBitVector(numFiles); +#if DEBUG + + Log.Write("EmptyStream: "); +#endif + for (int i = 0; i < emptyStreamVector.Length; i++) + { + if (emptyStreamVector[i]) + { +#if DEBUG + Log.Write("x"); +#endif + numEmptyStreams++; + } + else + { +#if DEBUG + Log.Write("."); +#endif + } + } +#if DEBUG + Log.WriteLine(); +#endif + + emptyFileVector = new BitVector(numEmptyStreams); + antiFileVector = new BitVector(numEmptyStreams); + break; + case BlockType.EmptyFile: + emptyFileVector = ReadBitVector(numEmptyStreams); +#if DEBUG + Log.Write("EmptyFile: "); + for (int i = 0; i < numEmptyStreams; i++) + { + Log.Write(emptyFileVector[i] ? "x" : "."); + } + Log.WriteLine(); +#endif + break; + case BlockType.Anti: + antiFileVector = ReadBitVector(numEmptyStreams); +#if DEBUG + Log.Write("Anti: "); + for (int i = 0; i < numEmptyStreams; i++) + { + Log.Write(antiFileVector[i] ? "x" : "."); + } + Log.WriteLine(); +#endif + break; + case BlockType.StartPos: +#if DEBUG + Log.Write("StartPos:"); +#endif + ReadNumberVector(dataVector, numFiles, delegate(int i, long? startPos) + { + db._files[i].StartPos = startPos; +#if DEBUG + Log.Write(" " + (startPos.HasValue ? startPos.Value.ToString() : "n/a")); +#endif + }); +#if DEBUG + Log.WriteLine(); +#endif + break; + case BlockType.CTime: +#if DEBUG + Log.Write("CTime:"); +#endif + ReadDateTimeVector(dataVector, numFiles, delegate(int i, DateTime? time) + { + db._files[i].CTime = time; +#if DEBUG + Log.Write(" " + (time.HasValue ? time.Value.ToString() : "n/a")); +#endif + }); +#if DEBUG + Log.WriteLine(); +#endif + break; + case BlockType.ATime: +#if DEBUG + Log.Write("ATime:"); +#endif + ReadDateTimeVector(dataVector, numFiles, delegate(int i, DateTime? time) + { + db._files[i].ATime = time; +#if DEBUG + Log.Write(" " + (time.HasValue ? time.Value.ToString() : "n/a")); +#endif + }); +#if DEBUG + Log.WriteLine(); +#endif + break; + case BlockType.MTime: +#if DEBUG + Log.Write("MTime:"); +#endif + ReadDateTimeVector(dataVector, numFiles, delegate(int i, DateTime? time) + { + db._files[i].MTime = time; +#if DEBUG + Log.Write(" " + (time.HasValue ? time.Value.ToString() : "n/a")); +#endif + }); +#if DEBUG + Log.WriteLine(); +#endif + break; + case BlockType.Dummy: +#if DEBUG + Log.Write("Dummy: " + size); +#endif + for (long j = 0; j < size; j++) + { + if (ReadByte() != 0) + { + throw new InvalidOperationException(); + } + } + break; + default: + SkipData(size); + break; + } + + // since 0.3 record sizes must be correct + bool checkRecordsSize = (db._majorVersion > 0 || db._minorVersion > 2); + if (checkRecordsSize && _currentReader.Offset - oldPos != size) + { + throw new InvalidOperationException(); + } + } + + int emptyFileIndex = 0; + int sizeIndex = 0; + for (int i = 0; i < numFiles; i++) + { + CFileItem file = db._files[i]; + file.HasStream = !emptyStreamVector[i]; + if (file.HasStream) + { + file.IsDir = false; + file.IsAnti = false; + file.Size = unpackSizes[sizeIndex]; + file.Crc = digests[sizeIndex]; + sizeIndex++; + } + else + { + file.IsDir = !emptyFileVector[emptyFileIndex]; + file.IsAnti = antiFileVector[emptyFileIndex]; + emptyFileIndex++; + file.Size = 0; + file.Crc = null; + } + } + } + finally + { +#if DEBUG + Log.PopIndent(); +#endif + } + } + + #endregion + + #region Public Methods + + public void Open(Stream stream) + { + Close(); + + _streamOrigin = stream.Position; + _streamEnding = stream.Length; + + // TODO: Check Signature! + _header = new byte[0x20]; + for (int offset = 0; offset < 0x20;) + { + int delta = stream.Read(_header, offset, 0x20 - offset); + if (delta == 0) + { + throw new EndOfStreamException(); + } + offset += delta; + } + + _stream = stream; + } + + public void Close() + { + if (_stream != null) + { + _stream.Dispose(); + } + + foreach (var stream in _cachedStreams.Values) + { + stream.Dispose(); + } + + _cachedStreams.Clear(); + } + + public ArchiveDatabase ReadDatabase(IPasswordProvider pass) + { + var db = new ArchiveDatabase(pass); + db.Clear(); + + db._majorVersion = _header[6]; + db._minorVersion = _header[7]; + + if (db._majorVersion != 0) + { + throw new InvalidOperationException(); + } + + uint crcFromArchive = DataReader.Get32(_header, 8); + long nextHeaderOffset = (long)DataReader.Get64(_header, 0xC); + long nextHeaderSize = (long)DataReader.Get64(_header, 0x14); + uint nextHeaderCrc = DataReader.Get32(_header, 0x1C); + + uint crc = Crc.INIT_CRC; + crc = Crc.Update(crc, nextHeaderOffset); + crc = Crc.Update(crc, nextHeaderSize); + crc = Crc.Update(crc, nextHeaderCrc); + crc = Crc.Finish(crc); + + if (crc != crcFromArchive) + { + throw new InvalidOperationException(); + } + + db._startPositionAfterHeader = _streamOrigin + 0x20; + + // empty header is ok + if (nextHeaderSize == 0) + { + db.Fill(); + return db; + } + + if (nextHeaderOffset < 0 || nextHeaderSize < 0 || nextHeaderSize > Int32.MaxValue) + { + throw new InvalidOperationException(); + } + + if (nextHeaderOffset > _streamEnding - db._startPositionAfterHeader) + { + throw new IndexOutOfRangeException(); + } + + _stream.Seek(nextHeaderOffset, SeekOrigin.Current); + + byte[] header = new byte[nextHeaderSize]; + _stream.ReadExact(header, 0, header.Length); + + if (Crc.Finish(Crc.Update(Crc.INIT_CRC, header, 0, header.Length)) != nextHeaderCrc) + { + throw new InvalidOperationException(); + } + + using (CStreamSwitch streamSwitch = new CStreamSwitch()) + { + streamSwitch.Set(this, header); + + BlockType? type = ReadId(); + if (type != BlockType.Header) + { + if (type != BlockType.EncodedHeader) + { + throw new InvalidOperationException(); + } + + var dataVector = ReadAndDecodePackedStreams(db._startPositionAfterHeader, db.PasswordProvider); + + // compressed header without content is odd but ok + if (dataVector.Count == 0) + { + db.Fill(); + return db; + } + + if (dataVector.Count != 1) + { + throw new InvalidOperationException(); + } + + streamSwitch.Set(this, dataVector[0]); + + if (ReadId() != BlockType.Header) + { + throw new InvalidOperationException(); + } + } + + ReadHeader(db, db.PasswordProvider); + } + db.Fill(); + return db; + } + + internal class CExtractFolderInfo + { + internal int _fileIndex; + internal int _folderIndex; + internal List _extractStatuses = new List(); + + internal CExtractFolderInfo(int fileIndex, int folderIndex) + { + _fileIndex = fileIndex; + _folderIndex = folderIndex; + if (fileIndex != -1) + { + _extractStatuses.Add(true); + } + } + } + + private class FolderUnpackStream : Stream + { + private readonly ArchiveDatabase _db; + private readonly int _startIndex; + private readonly List _extractStatuses; + + public FolderUnpackStream(ArchiveDatabase db, int p, int startIndex, List list) + { + _db = db; + _startIndex = startIndex; + _extractStatuses = list; + } + + #region Stream + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + private Stream _stream; + private long _rem; + private int _currentIndex; + + private void ProcessEmptyFiles() + { + while (_currentIndex < _extractStatuses.Count && _db._files[_startIndex + _currentIndex].Size == 0) + { + OpenFile(); + _stream.Dispose(); + _stream = null; + _currentIndex++; + } + } + + private void OpenFile() + { + int index = _startIndex + _currentIndex; +#if DEBUG + Log.WriteLine(_db._files[index].Name); +#endif + if (_db._files[index].CrcDefined) + { + _stream = new CrcCheckStream(_db._files[index].Crc.Value); + } + else + { + _stream = new MemoryStream(); + } + _rem = _db._files[index].Size; + } + + public override void Write(byte[] buffer, int offset, int count) + { + while (count != 0) + { + if (_stream != null) + { + int write = count; + if (write > _rem) + { + write = (int)_rem; + } + _stream.Write(buffer, offset, write); + count -= write; + _rem -= write; + offset += write; + if (_rem == 0) + { + _stream.Dispose(); + _stream = null; + _currentIndex++; + ProcessEmptyFiles(); + } + } + else + { + ProcessEmptyFiles(); + if (_currentIndex == _extractStatuses.Count) + { + // we support partial extracting + Debugger.Break(); + throw new NotSupportedException(); + } + OpenFile(); + } + } + } + + #endregion + } + + private Stream GetCachedDecoderStream(ArchiveDatabase db, int folderIndex) + { + Stream s; + if (!_cachedStreams.TryGetValue(folderIndex, out s)) + { + CFolder folderInfo = db._folders[folderIndex]; + int packStreamIndex = db._folders[folderIndex]._firstPackStreamId; + long folderStartPackPos = db.GetFolderStreamPos(folderInfo, 0); + List packSizes = new List(); + for (int j = 0; j < folderInfo._packStreams.Count; j++) + { + packSizes.Add(db._packSizes[packStreamIndex + j]); + } + + s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes.ToArray(), folderInfo, + db.PasswordProvider); + _cachedStreams.Add(folderIndex, s); + } + return s; + } + + public Stream OpenStream(ArchiveDatabase db, int fileIndex) + { + int folderIndex = db._fileIndexToFolderIndexMap[fileIndex]; + int numFilesInFolder = db._numUnpackStreamsVector[folderIndex]; + int firstFileIndex = db._folderStartFileIndex[folderIndex]; + if (firstFileIndex > fileIndex || fileIndex - firstFileIndex >= numFilesInFolder) + { + throw new InvalidOperationException(); + } + + int skipCount = fileIndex - firstFileIndex; + long skipSize = 0; + for (int i = 0; i < skipCount; i++) + { + skipSize += db._files[firstFileIndex + i].Size; + } + + Stream s = GetCachedDecoderStream(db, folderIndex); + s.Position = skipSize; + return new ReadOnlySubStream(s, db._files[fileIndex].Size); + } + + public void Extract(ArchiveDatabase db, int[] indices) + { + int numItems; + bool allFilesMode = (indices == null); + if (allFilesMode) + { + numItems = db._files.Count; + } + else + { + numItems = indices.Length; + } + + if (numItems == 0) + { + return; + } + + List extractFolderInfoVector = new List(); + for (int i = 0; i < numItems; i++) + { + int fileIndex = allFilesMode ? i : indices[i]; + + int folderIndex = db._fileIndexToFolderIndexMap[fileIndex]; + if (folderIndex == -1) + { + extractFolderInfoVector.Add(new CExtractFolderInfo(fileIndex, -1)); + continue; + } + + if (extractFolderInfoVector.Count == 0 || folderIndex != extractFolderInfoVector.Last()._folderIndex) + { + extractFolderInfoVector.Add(new CExtractFolderInfo(-1, folderIndex)); + } + + CExtractFolderInfo efi = extractFolderInfoVector.Last(); + + int startIndex = db._folderStartFileIndex[folderIndex]; + for (int index = efi._extractStatuses.Count; index <= fileIndex - startIndex; index++) + { + efi._extractStatuses.Add(index == fileIndex - startIndex); + } + } + + foreach (CExtractFolderInfo efi in extractFolderInfoVector) + { + int startIndex; + if (efi._fileIndex != -1) + { + startIndex = efi._fileIndex; + } + else + { + startIndex = db._folderStartFileIndex[efi._folderIndex]; + } + + var outStream = new FolderUnpackStream(db, 0, startIndex, efi._extractStatuses); + + if (efi._fileIndex != -1) + { + continue; + } + + int folderIndex = efi._folderIndex; + CFolder folderInfo = db._folders[folderIndex]; + + int packStreamIndex = db._folders[folderIndex]._firstPackStreamId; + long folderStartPackPos = db.GetFolderStreamPos(folderInfo, 0); + + List packSizes = new List(); + for (int j = 0; j < folderInfo._packStreams.Count; j++) + { + packSizes.Add(db._packSizes[packStreamIndex + j]); + } + + // TODO: If the decoding fails the last file may be extracted incompletely. Delete it? + + Stream s = DecoderStreamHelper.CreateDecoderStream(_stream, folderStartPackPos, packSizes.ToArray(), + folderInfo, db.PasswordProvider); + byte[] buffer = new byte[4 << 10]; + for (;;) + { + int processed = s.Read(buffer, 0, buffer.Length); + if (processed == 0) + { + break; + } + outStream.Write(buffer, 0, processed); + } + } + } + + public IEnumerable GetFiles(ArchiveDatabase db) + { + return db._files; + } + + public int GetFileIndex(ArchiveDatabase db, CFileItem item) + { + return db._files.IndexOf(item); + } + + #endregion + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CBindPair.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CBindPair.cs new file mode 100644 index 0000000000..ad947967a4 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CBindPair.cs @@ -0,0 +1,8 @@ +namespace SharpCompress.Common.SevenZip +{ + internal class CBindPair + { + internal int _inIndex; + internal int _outIndex; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CCoderInfo.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CCoderInfo.cs new file mode 100644 index 0000000000..035722991f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CCoderInfo.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Common.SevenZip +{ + internal class CCoderInfo + { + internal CMethodId _methodId; + internal byte[] _props; + internal int _numInStreams; + internal int _numOutStreams; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CFileItem.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CFileItem.cs new file mode 100644 index 0000000000..450cbdfc5c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CFileItem.cs @@ -0,0 +1,36 @@ +using System; + +namespace SharpCompress.Common.SevenZip +{ + internal class CFileItem + { + public long Size { get; internal set; } + public uint? Attrib { get; internal set; } + public uint? Crc { get; internal set; } + public string Name { get; internal set; } + + public bool HasStream { get; internal set; } + public bool IsDir { get; internal set; } + + public bool CrcDefined => Crc != null; + + public bool AttribDefined => Attrib != null; + + public void SetAttrib(uint attrib) + { + Attrib = attrib; + } + + public DateTime? CTime { get; internal set; } + public DateTime? ATime { get; internal set; } + public DateTime? MTime { get; internal set; } + + public long? StartPos { get; internal set; } + public bool IsAnti { get; internal set; } + + internal CFileItem() + { + HasStream = true; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CFolder.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CFolder.cs new file mode 100644 index 0000000000..4a68cf505e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CFolder.cs @@ -0,0 +1,188 @@ +using System; +using System.Collections.Generic; +using SharpCompress.Compressors.LZMA; + +namespace SharpCompress.Common.SevenZip +{ + internal class CFolder + { + internal List _coders = new List(); + internal List _bindPairs = new List(); + internal List _packStreams = new List(); + internal int _firstPackStreamId; + internal List _unpackSizes = new List(); + internal uint? _unpackCrc; + + internal bool UnpackCrcDefined => _unpackCrc != null; + + public long GetUnpackSize() + { + if (_unpackSizes.Count == 0) + { + return 0; + } + + for (int i = _unpackSizes.Count - 1; i >= 0; i--) + { + if (FindBindPairForOutStream(i) < 0) + { + return _unpackSizes[i]; + } + } + + throw new Exception(); + } + + public int GetNumOutStreams() + { + int count = 0; + for (int i = 0; i < _coders.Count; i++) + { + count += _coders[i]._numOutStreams; + } + + return count; + } + + public int FindBindPairForInStream(int inStreamIndex) + { + for (int i = 0; i < _bindPairs.Count; i++) + { + if (_bindPairs[i]._inIndex == inStreamIndex) + { + return i; + } + } + + return -1; + } + + public int FindBindPairForOutStream(int outStreamIndex) + { + for (int i = 0; i < _bindPairs.Count; i++) + { + if (_bindPairs[i]._outIndex == outStreamIndex) + { + return i; + } + } + + return -1; + } + + public int FindPackStreamArrayIndex(int inStreamIndex) + { + for (int i = 0; i < _packStreams.Count; i++) + { + if (_packStreams[i] == inStreamIndex) + { + return i; + } + } + + return -1; + } + + public bool IsEncrypted() + { + for (int i = _coders.Count - 1; i >= 0; i--) + { + if (_coders[i]._methodId == CMethodId.K_AES) + { + return true; + } + } + + return false; + } + + public bool CheckStructure() + { + const int kNumCodersMax = 32; // don't change it + const int kMaskSize = 32; // it must be >= kNumCodersMax + const int kNumBindsMax = 32; + + if (_coders.Count > kNumCodersMax || _bindPairs.Count > kNumBindsMax) + { + return false; + } + + { + var v = new BitVector(_bindPairs.Count + _packStreams.Count); + + for (int i = 0; i < _bindPairs.Count; i++) + { + if (v.GetAndSet(_bindPairs[i]._inIndex)) + { + return false; + } + } + + for (int i = 0; i < _packStreams.Count; i++) + { + if (v.GetAndSet(_packStreams[i])) + { + return false; + } + } + } + + { + var v = new BitVector(_unpackSizes.Count); + for (int i = 0; i < _bindPairs.Count; i++) + { + if (v.GetAndSet(_bindPairs[i]._outIndex)) + { + return false; + } + } + } + + uint[] mask = new uint[kMaskSize]; + + { + List inStreamToCoder = new List(); + List outStreamToCoder = new List(); + for (int i = 0; i < _coders.Count; i++) + { + CCoderInfo coder = _coders[i]; + for (int j = 0; j < coder._numInStreams; j++) + { + inStreamToCoder.Add(i); + } + for (int j = 0; j < coder._numOutStreams; j++) + { + outStreamToCoder.Add(i); + } + } + + for (int i = 0; i < _bindPairs.Count; i++) + { + CBindPair bp = _bindPairs[i]; + mask[inStreamToCoder[bp._inIndex]] |= (1u << outStreamToCoder[bp._outIndex]); + } + } + + for (int i = 0; i < kMaskSize; i++) + { + for (int j = 0; j < kMaskSize; j++) + { + if (((1u << j) & mask[i]) != 0) + { + mask[i] |= mask[j]; + } + } + } + + for (int i = 0; i < kMaskSize; i++) + { + if (((1u << i) & mask[i]) != 0) + { + return false; + } + } + + return true; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CMethodId.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CMethodId.cs new file mode 100644 index 0000000000..ee9e3bb347 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CMethodId.cs @@ -0,0 +1,57 @@ +namespace SharpCompress.Common.SevenZip +{ + internal struct CMethodId + { + public const ulong K_COPY_ID = 0; + public const ulong K_LZMA_ID = 0x030101; + public const ulong K_LZMA2_ID = 0x21; + public const ulong K_AES_ID = 0x06F10701; + + public static readonly CMethodId K_COPY = new CMethodId(K_COPY_ID); + public static readonly CMethodId K_LZMA = new CMethodId(K_LZMA_ID); + public static readonly CMethodId K_LZMA2 = new CMethodId(K_LZMA2_ID); + public static readonly CMethodId K_AES = new CMethodId(K_AES_ID); + + public readonly ulong _id; + + public CMethodId(ulong id) + { + _id = id; + } + + public override int GetHashCode() + { + return _id.GetHashCode(); + } + + public override bool Equals(object obj) + { + return obj is CMethodId && (CMethodId)obj == this; + } + + public bool Equals(CMethodId other) + { + return _id == other._id; + } + + public static bool operator ==(CMethodId left, CMethodId right) + { + return left._id == right._id; + } + + public static bool operator !=(CMethodId left, CMethodId right) + { + return left._id != right._id; + } + + public int GetLength() + { + int bytes = 0; + for (ulong value = _id; value != 0; value >>= 8) + { + bytes++; + } + return bytes; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CStreamSwitch.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CStreamSwitch.cs new file mode 100644 index 0000000000..c1cd51c9b4 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/CStreamSwitch.cs @@ -0,0 +1,69 @@ +using System; +using System.Collections.Generic; +using SharpCompress.Compressors.LZMA; + +namespace SharpCompress.Common.SevenZip +{ + internal struct CStreamSwitch : IDisposable + { + private ArchiveReader _archive; + private bool _needRemove; + private bool _active; + + public void Dispose() + { + if (_active) + { + _active = false; +#if DEBUG + Log.WriteLine("[end of switch]"); +#endif + } + + if (_needRemove) + { + _needRemove = false; + _archive.DeleteByteStream(); + } + } + + public void Set(ArchiveReader archive, byte[] dataVector) + { + Dispose(); + _archive = archive; + _archive.AddByteStream(dataVector, 0, dataVector.Length); + _needRemove = true; + _active = true; + } + + public void Set(ArchiveReader archive, List dataVector) + { + Dispose(); + _active = true; + + byte external = archive.ReadByte(); + if (external != 0) + { + int dataIndex = archive.ReadNum(); + if (dataIndex < 0 || dataIndex >= dataVector.Count) + { + throw new InvalidOperationException(); + } + +#if DEBUG + Log.WriteLine("[switch to stream {0}]", dataIndex); +#endif + _archive = archive; + _archive.AddByteStream(dataVector[dataIndex], 0, dataVector[dataIndex].Length); + _needRemove = true; + _active = true; + } + else + { +#if DEBUG + Log.WriteLine("[inline data]"); +#endif + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/DataReader.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/DataReader.cs new file mode 100644 index 0000000000..910814d64e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/DataReader.cs @@ -0,0 +1,186 @@ +using System; +using System.IO; +using System.Text; +using SharpCompress.Compressors.LZMA; + +namespace SharpCompress.Common.SevenZip +{ + internal class DataReader + { + #region Static Methods + + public static uint Get32(byte[] buffer, int offset) + { + return buffer[offset] + + ((uint)buffer[offset + 1] << 8) + + ((uint)buffer[offset + 2] << 16) + + ((uint)buffer[offset + 3] << 24); + } + + public static ulong Get64(byte[] buffer, int offset) + { + return buffer[offset] + + ((ulong)buffer[offset + 1] << 8) + + ((ulong)buffer[offset + 2] << 16) + + ((ulong)buffer[offset + 3] << 24) + + ((ulong)buffer[offset + 4] << 32) + + ((ulong)buffer[offset + 5] << 40) + + ((ulong)buffer[offset + 6] << 48) + + ((ulong)buffer[offset + 7] << 56); + } + + #endregion + + #region Variables + + private readonly byte[] _buffer; + private readonly int _ending; + + #endregion + + #region Public Methods + + public DataReader(byte[] buffer, int offset, int length) + { + _buffer = buffer; + Offset = offset; + _ending = offset + length; + } + + public int Offset { get; private set; } + + public Byte ReadByte() + { + if (Offset >= _ending) + { + throw new EndOfStreamException(); + } + + return _buffer[Offset++]; + } + + public void ReadBytes(byte[] buffer, int offset, int length) + { + if (length > _ending - Offset) + { + throw new EndOfStreamException(); + } + + while (length-- > 0) + { + buffer[offset++] = _buffer[Offset++]; + } + } + + public void SkipData(long size) + { + if (size > _ending - Offset) + { + throw new EndOfStreamException(); + } + + Offset += (int)size; +#if DEBUG + Log.WriteLine("SkipData {0}", size); +#endif + } + + public void SkipData() + { + SkipData(checked((long)ReadNumber())); + } + + public ulong ReadNumber() + { + if (Offset >= _ending) + { + throw new EndOfStreamException(); + } + + byte firstByte = _buffer[Offset++]; + byte mask = 0x80; + ulong value = 0; + + for (int i = 0; i < 8; i++) + { + if ((firstByte & mask) == 0) + { + ulong highPart = firstByte & (mask - 1u); + value += highPart << (i * 8); + return value; + } + + if (Offset >= _ending) + { + throw new EndOfStreamException(); + } + + value |= (ulong)_buffer[Offset++] << (8 * i); + mask >>= 1; + } + + return value; + } + + public int ReadNum() + { + ulong value = ReadNumber(); + if (value > Int32.MaxValue) + { + throw new NotSupportedException(); + } + + return (int)value; + } + + public uint ReadUInt32() + { + if (Offset + 4 > _ending) + { + throw new EndOfStreamException(); + } + + uint res = Get32(_buffer, Offset); + Offset += 4; + return res; + } + + public ulong ReadUInt64() + { + if (Offset + 8 > _ending) + { + throw new EndOfStreamException(); + } + + ulong res = Get64(_buffer, Offset); + Offset += 8; + return res; + } + + public string ReadString() + { + int ending = Offset; + + for (;;) + { + if (ending + 2 > _ending) + { + throw new EndOfStreamException(); + } + + if (_buffer[ending] == 0 && _buffer[ending + 1] == 0) + { + break; + } + + ending += 2; + } + + string str = Encoding.Unicode.GetString(_buffer, Offset, ending - Offset); + Offset = ending + 2; + return str; + } + + #endregion + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipEntry.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipEntry.cs new file mode 100644 index 0000000000..c2ea2efe82 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipEntry.cs @@ -0,0 +1,45 @@ +using System; +using System.Collections.Generic; + +namespace SharpCompress.Common.SevenZip +{ + public class SevenZipEntry : Entry + { + internal SevenZipEntry(SevenZipFilePart filePart) + { + FilePart = filePart; + } + + internal SevenZipFilePart FilePart { get; } + + public override CompressionType CompressionType => FilePart.CompressionType; + + public override long Crc => FilePart.Header.Crc ?? 0; + + public override string Key => FilePart.Header.Name; + + public override string LinkTarget => null; + + public override long CompressedSize => 0; + + public override long Size => FilePart.Header.Size; + + public override DateTime? LastModifiedTime => FilePart.Header.MTime; + + public override DateTime? CreatedTime => null; + + public override DateTime? LastAccessedTime => null; + + public override DateTime? ArchivedTime => null; + + public override bool IsEncrypted => false; + + public override bool IsDirectory => FilePart.Header.IsDir; + + public override bool IsSplitAfter => false; + + public override int? Attrib => (int)FilePart.Header.Attrib; + + internal override IEnumerable Parts => FilePart.AsEnumerable(); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipFilePart.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipFilePart.cs new file mode 100644 index 0000000000..3c5232b222 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipFilePart.cs @@ -0,0 +1,106 @@ +using System; +using System.IO; +using System.Linq; +using SharpCompress.IO; + +namespace SharpCompress.Common.SevenZip +{ + internal class SevenZipFilePart : FilePart + { + private CompressionType? _type; + private readonly Stream _stream; + private readonly ArchiveDatabase _database; + + internal SevenZipFilePart(Stream stream, ArchiveDatabase database, int index, CFileItem fileEntry, ArchiveEncoding archiveEncoding) + : base(archiveEncoding) + { + _stream = stream; + _database = database; + Index = index; + Header = fileEntry; + if (Header.HasStream) + { + Folder = database._folders[database._fileIndexToFolderIndexMap[index]]; + } + } + + internal CFileItem Header { get; } + internal CFolder Folder { get; } + internal int Index { get; } + + internal override string FilePartName => Header.Name; + + internal override Stream GetRawStream() + { + return null; + } + + internal override Stream GetCompressedStream() + { + if (!Header.HasStream) + { + return null; + } + var folderStream = _database.GetFolderStream(_stream, Folder, _database.PasswordProvider); + + int firstFileIndex = _database._folderStartFileIndex[_database._folders.IndexOf(Folder)]; + int skipCount = Index - firstFileIndex; + long skipSize = 0; + for (int i = 0; i < skipCount; i++) + { + skipSize += _database._files[firstFileIndex + i].Size; + } + if (skipSize > 0) + { + folderStream.Skip(skipSize); + } + return new ReadOnlySubStream(folderStream, Header.Size); + } + + public CompressionType CompressionType + { + get + { + if (_type == null) + { + _type = GetCompression(); + } + return _type.Value; + } + } + + //copied from DecoderRegistry + private const uint K_COPY = 0x0; + private const uint K_DELTA = 3; + private const uint K_LZMA2 = 0x21; + private const uint K_LZMA = 0x030101; + private const uint K_PPMD = 0x030401; + private const uint K_BCJ = 0x03030103; + private const uint K_BCJ2 = 0x0303011B; + private const uint K_DEFLATE = 0x040108; + private const uint K_B_ZIP2 = 0x040202; + + internal CompressionType GetCompression() + { + var coder = Folder._coders.First(); + switch (coder._methodId._id) + { + case K_LZMA: + case K_LZMA2: + { + return CompressionType.LZMA; + } + case K_PPMD: + { + return CompressionType.PPMd; + } + case K_B_ZIP2: + { + return CompressionType.BZip2; + } + default: + throw new NotImplementedException(); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipVolume.cs b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipVolume.cs new file mode 100644 index 0000000000..32b50cc795 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/SevenZip/SevenZipVolume.cs @@ -0,0 +1,14 @@ +using System.IO; +using SharpCompress.Archives; +using SharpCompress.Readers; + +namespace SharpCompress.Common.SevenZip +{ + public class SevenZipVolume : Volume + { + public SevenZipVolume(Stream stream, ReaderOptions readerFactoryOptions) + : base(stream, readerFactoryOptions) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Tar/Headers/EntryType.cs b/BizHawk.Client.Common/SharpCompress/Common/Tar/Headers/EntryType.cs new file mode 100644 index 0000000000..06ba1f0d5d --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Tar/Headers/EntryType.cs @@ -0,0 +1,19 @@ +namespace SharpCompress.Common.Tar.Headers +{ + internal enum EntryType : byte + { + File = 0, + OldFile = (byte)'0', + HardLink = (byte)'1', + SymLink = (byte)'2', + CharDevice = (byte)'3', + BlockDevice = (byte)'4', + Directory = (byte)'5', + Fifo = (byte)'6', + LongLink = (byte)'K', + LongName = (byte)'L', + SparseFile = (byte)'S', + VolumeHeader = (byte)'V', + GlobalExtendedHeader = (byte)'g' + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Tar/Headers/TarHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Tar/Headers/TarHeader.cs new file mode 100644 index 0000000000..db6d5cf4b3 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Tar/Headers/TarHeader.cs @@ -0,0 +1,282 @@ +using System; +using System.IO; +using System.Text; +using SharpCompress.Converters; + +namespace SharpCompress.Common.Tar.Headers +{ + internal class TarHeader + { + internal static readonly DateTime EPOCH = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); + + public TarHeader(ArchiveEncoding archiveEncoding) + { + ArchiveEncoding = archiveEncoding; + } + + internal string Name { get; set; } + internal string LinkName { get; set; } + + //internal int Mode { get; set; } + //internal int UserId { get; set; } + //internal string UserName { get; set; } + //internal int GroupId { get; set; } + //internal string GroupName { get; set; } + internal long Size { get; set; } + internal DateTime LastModifiedTime { get; set; } + internal EntryType EntryType { get; set; } + internal Stream PackedStream { get; set; } + internal ArchiveEncoding ArchiveEncoding { get; } + + internal const int BLOCK_SIZE = 512; + + internal void Write(Stream output) + { + byte[] buffer = new byte[BLOCK_SIZE]; + + WriteOctalBytes(511, buffer, 100, 8); // file mode + WriteOctalBytes(0, buffer, 108, 8); // owner ID + WriteOctalBytes(0, buffer, 116, 8); // group ID + + //ArchiveEncoding.UTF8.GetBytes("magic").CopyTo(buffer, 257); + if (Name.Length > 100) + { + // Set mock filename and filetype to indicate the next block is the actual name of the file + WriteStringBytes("././@LongLink", buffer, 0, 100); + buffer[156] = (byte)EntryType.LongName; + WriteOctalBytes(Name.Length + 1, buffer, 124, 12); + } + else + { + WriteStringBytes(Name, buffer, 0, 100); + WriteOctalBytes(Size, buffer, 124, 12); + var time = (long)(LastModifiedTime.ToUniversalTime() - EPOCH).TotalSeconds; + WriteOctalBytes(time, buffer, 136, 12); + buffer[156] = (byte)EntryType; + + if (Size >= 0x1FFFFFFFF) + { + byte[] bytes = DataConverter.BigEndian.GetBytes(Size); + var bytes12 = new byte[12]; + bytes.CopyTo(bytes12, 12 - bytes.Length); + bytes12[0] |= 0x80; + bytes12.CopyTo(buffer, 124); + } + } + + int crc = RecalculateChecksum(buffer); + WriteOctalBytes(crc, buffer, 148, 8); + + output.Write(buffer, 0, buffer.Length); + + if (Name.Length > 100) + { + WriteLongFilenameHeader(output); + Name = Name.Substring(0, 100); + Write(output); + } + } + + private void WriteLongFilenameHeader(Stream output) + { + byte[] nameBytes = ArchiveEncoding.Encode(Name); + output.Write(nameBytes, 0, nameBytes.Length); + + // pad to multiple of BlockSize bytes, and make sure a terminating null is added + int numPaddingBytes = BLOCK_SIZE - (nameBytes.Length % BLOCK_SIZE); + if (numPaddingBytes == 0) + { + numPaddingBytes = BLOCK_SIZE; + } + output.Write(new byte[numPaddingBytes], 0, numPaddingBytes); + } + + internal bool Read(BinaryReader reader) + { + var buffer = ReadBlock(reader); + if (buffer.Length == 0) + { + return false; + } + + // for symlinks, additionally read the linkname + if (ReadEntryType(buffer) == EntryType.SymLink) + { + LinkName = ArchiveEncoding.Decode(buffer, 157, 100).TrimNulls(); + } + + if (ReadEntryType(buffer) == EntryType.LongName) + { + Name = ReadLongName(reader, buffer); + buffer = ReadBlock(reader); + } + else + { + Name = ArchiveEncoding.Decode(buffer, 0, 100).TrimNulls(); + } + + EntryType = ReadEntryType(buffer); + Size = ReadSize(buffer); + + //Mode = ReadASCIIInt32Base8(buffer, 100, 7); + //UserId = ReadASCIIInt32Base8(buffer, 108, 7); + //GroupId = ReadASCIIInt32Base8(buffer, 116, 7); + long unixTimeStamp = ReadAsciiInt64Base8(buffer, 136, 11); + LastModifiedTime = EPOCH.AddSeconds(unixTimeStamp).ToLocalTime(); + + Magic = ArchiveEncoding.Decode(buffer, 257, 6).TrimNulls(); + + if (!string.IsNullOrEmpty(Magic) + && "ustar".Equals(Magic)) + { + string namePrefix = ArchiveEncoding.Decode(buffer, 345, 157); + namePrefix = namePrefix.TrimNulls(); + if (!string.IsNullOrEmpty(namePrefix)) + { + Name = namePrefix + "/" + Name; + } + } + if (EntryType != EntryType.LongName + && Name.Length == 0) + { + return false; + } + return true; + } + + private string ReadLongName(BinaryReader reader, byte[] buffer) + { + var size = ReadSize(buffer); + var nameLength = (int)size; + var nameBytes = reader.ReadBytes(nameLength); + var remainingBytesToRead = BLOCK_SIZE - (nameLength % BLOCK_SIZE); + + // Read the rest of the block and discard the data + if (remainingBytesToRead < BLOCK_SIZE) + { + reader.ReadBytes(remainingBytesToRead); + } + return ArchiveEncoding.Decode(nameBytes, 0, nameBytes.Length).TrimNulls(); + } + + private static EntryType ReadEntryType(byte[] buffer) + { + return (EntryType)buffer[156]; + } + + private long ReadSize(byte[] buffer) + { + if ((buffer[124] & 0x80) == 0x80) // if size in binary + { + return DataConverter.BigEndian.GetInt64(buffer, 0x80); + } + return ReadAsciiInt64Base8(buffer, 124, 11); + } + + private static byte[] ReadBlock(BinaryReader reader) + { + byte[] buffer = reader.ReadBytes(BLOCK_SIZE); + + if (buffer.Length != 0 && buffer.Length < BLOCK_SIZE) + { + throw new InvalidOperationException("Buffer is invalid size"); + } + return buffer; + } + + private static void WriteStringBytes(string name, byte[] buffer, int offset, int length) + { + int i; + + for (i = 0; i < length && i < name.Length; ++i) + { + buffer[offset + i] = (byte)name[i]; + } + + for (; i < length; ++i) + { + buffer[offset + i] = 0; + } + } + + private static void WriteOctalBytes(long value, byte[] buffer, int offset, int length) + { + string val = Convert.ToString(value, 8); + int shift = length - val.Length - 1; + for (int i = 0; i < shift; i++) + { + buffer[offset + i] = (byte)' '; + } + for (int i = 0; i < val.Length; i++) + { + buffer[offset + i + shift] = (byte)val[i]; + } + } + + private static int ReadAsciiInt32Base8(byte[] buffer, int offset, int count) + { + string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls(); + if (string.IsNullOrEmpty(s)) + { + return 0; + } + return Convert.ToInt32(s, 8); + } + + private static long ReadAsciiInt64Base8(byte[] buffer, int offset, int count) + { + string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls(); + if (string.IsNullOrEmpty(s)) + { + return 0; + } + return Convert.ToInt64(s, 8); + } + + private static long ReadAsciiInt64(byte[] buffer, int offset, int count) + { + string s = Encoding.UTF8.GetString(buffer, offset, count).TrimNulls(); + if (string.IsNullOrEmpty(s)) + { + return 0; + } + return Convert.ToInt64(s); + } + + internal static int RecalculateChecksum(byte[] buf) + { + // Set default value for checksum. That is 8 spaces. + Encoding.UTF8.GetBytes(" ").CopyTo(buf, 148); + + // Calculate checksum + int headerChecksum = 0; + foreach (byte b in buf) + { + headerChecksum += b; + } + return headerChecksum; + } + + internal static int RecalculateAltChecksum(byte[] buf) + { + Encoding.UTF8.GetBytes(" ").CopyTo(buf, 148); + int headerChecksum = 0; + foreach (byte b in buf) + { + if ((b & 0x80) == 0x80) + { + headerChecksum -= b ^ 0x80; + } + else + { + headerChecksum += b; + } + } + return headerChecksum; + } + + public long? DataStartPosition { get; set; } + + public string Magic { get; set; } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Common/Tar/TarEntry.cs b/BizHawk.Client.Common/SharpCompress/Common/Tar/TarEntry.cs new file mode 100644 index 0000000000..6ec5d31995 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Tar/TarEntry.cs @@ -0,0 +1,67 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common.Tar.Headers; +using SharpCompress.IO; +using System.Text; + +namespace SharpCompress.Common.Tar +{ + public class TarEntry : Entry + { + private readonly TarFilePart _filePart; + + internal TarEntry(TarFilePart filePart, CompressionType type) + { + this._filePart = filePart; + CompressionType = type; + } + + public override CompressionType CompressionType { get; } + + public override long Crc => 0; + + public override string Key => _filePart.Header.Name; + + public override string LinkTarget => _filePart.Header.LinkName; + + public override long CompressedSize => _filePart.Header.Size; + + public override long Size => _filePart.Header.Size; + + public override DateTime? LastModifiedTime => _filePart.Header.LastModifiedTime; + + public override DateTime? CreatedTime => null; + + public override DateTime? LastAccessedTime => null; + + public override DateTime? ArchivedTime => null; + + public override bool IsEncrypted => false; + + public override bool IsDirectory => _filePart.Header.EntryType == EntryType.Directory; + + public override bool IsSplitAfter => false; + + internal override IEnumerable Parts => _filePart.AsEnumerable(); + + internal static IEnumerable GetEntries(StreamingMode mode, Stream stream, + CompressionType compressionType, ArchiveEncoding archiveEncoding) + { + foreach (TarHeader h in TarHeaderFactory.ReadHeader(mode, stream, archiveEncoding)) + { + if (h != null) + { + if (mode == StreamingMode.Seekable) + { + yield return new TarEntry(new TarFilePart(h, stream), compressionType); + } + else + { + yield return new TarEntry(new TarFilePart(h, null), compressionType); + } + } + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Tar/TarFilePart.cs b/BizHawk.Client.Common/SharpCompress/Common/Tar/TarFilePart.cs new file mode 100644 index 0000000000..dafbb86758 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Tar/TarFilePart.cs @@ -0,0 +1,37 @@ +using System.IO; +using SharpCompress.Common.Tar.Headers; +using SharpCompress.IO; + +namespace SharpCompress.Common.Tar +{ + internal class TarFilePart : FilePart + { + private readonly Stream _seekableStream; + + internal TarFilePart(TarHeader header, Stream seekableStream) + : base(header.ArchiveEncoding) + { + _seekableStream = seekableStream; + Header = header; + } + + internal TarHeader Header { get; } + + internal override string FilePartName => Header.Name; + + internal override Stream GetCompressedStream() + { + if (_seekableStream != null) + { + _seekableStream.Position = Header.DataStartPosition.Value; + return new ReadOnlySubStream(_seekableStream, Header.Size); + } + return Header.PackedStream; + } + + internal override Stream GetRawStream() + { + return null; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Tar/TarHeaderFactory.cs b/BizHawk.Client.Common/SharpCompress/Common/Tar/TarHeaderFactory.cs new file mode 100644 index 0000000000..4671f0944b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Tar/TarHeaderFactory.cs @@ -0,0 +1,64 @@ +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common.Tar.Headers; +using SharpCompress.IO; +using System.Text; + +namespace SharpCompress.Common.Tar +{ + internal static class TarHeaderFactory + { + internal static IEnumerable ReadHeader(StreamingMode mode, Stream stream, ArchiveEncoding archiveEncoding) + { + while (true) + { + TarHeader header = null; + try + { + BinaryReader reader = new BinaryReader(stream); + header = new TarHeader(archiveEncoding); + + if (!header.Read(reader)) + { + yield break; + } + switch (mode) + { + case StreamingMode.Seekable: + { + header.DataStartPosition = reader.BaseStream.Position; + + //skip to nearest 512 + reader.BaseStream.Position += PadTo512(header.Size); + } + break; + case StreamingMode.Streaming: + { + header.PackedStream = new TarReadOnlySubStream(stream, header.Size); + } + break; + default: + { + throw new InvalidFormatException("Invalid StreamingMode"); + } + } + } + catch + { + header = null; + } + yield return header; + } + } + + private static long PadTo512(long size) + { + int zeros = (int)(size % 512); + if (zeros == 0) + { + return size; + } + return 512 - zeros + size; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Tar/TarReadOnlySubStream.cs b/BizHawk.Client.Common/SharpCompress/Common/Tar/TarReadOnlySubStream.cs new file mode 100644 index 0000000000..ed808d4282 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Tar/TarReadOnlySubStream.cs @@ -0,0 +1,105 @@ +using SharpCompress.IO; +using System; +using System.IO; + +namespace SharpCompress.Common.Tar +{ + internal class TarReadOnlySubStream : NonDisposingStream + { + private bool _isDisposed; + private long _amountRead; + + public TarReadOnlySubStream(Stream stream, long bytesToRead) : base(stream, throwOnDispose: false) + { + BytesLeftToRead = bytesToRead; + } + + protected override void Dispose(bool disposing) + { + if (_isDisposed) + { + return; + } + _isDisposed = true; + if (disposing) + { + long skipBytes = _amountRead % 512; + if (skipBytes == 0) + { + return; + } + skipBytes = 512 - skipBytes; + if (skipBytes == 0) + { + return; + } + var buffer = new byte[skipBytes]; + Stream.ReadFully(buffer); + } + base.Dispose(disposing); + } + + private long BytesLeftToRead { get; set; } + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + if (BytesLeftToRead < count) + { + count = (int)BytesLeftToRead; + } + int read = Stream.Read(buffer, offset, count); + if (read > 0) + { + BytesLeftToRead -= read; + _amountRead += read; + } + return read; + } + + public override int ReadByte() + { + if (BytesLeftToRead <= 0) + { + return -1; + } + int value = Stream.ReadByte(); + if (value != -1) + { + --BytesLeftToRead; + ++_amountRead; + } + return value; + + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Tar/TarVolume.cs b/BizHawk.Client.Common/SharpCompress/Common/Tar/TarVolume.cs new file mode 100644 index 0000000000..1ddae4d798 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Tar/TarVolume.cs @@ -0,0 +1,13 @@ +using System.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Common.Tar +{ + public class TarVolume : Volume + { + public TarVolume(Stream stream, ReaderOptions readerOptions) + : base(stream, readerOptions) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Volume.cs b/BizHawk.Client.Common/SharpCompress/Common/Volume.cs new file mode 100644 index 0000000000..b937b28152 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Volume.cs @@ -0,0 +1,51 @@ +using System; +using System.IO; +using SharpCompress.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Common +{ + public abstract class Volume : IVolume + { + private readonly Stream _actualStream; + + internal Volume(Stream stream, ReaderOptions readerOptions) + { + ReaderOptions = readerOptions; + if (readerOptions.LeaveStreamOpen) + { + stream = new NonDisposingStream(stream); + } + _actualStream = stream; + } + + internal Stream Stream => _actualStream; + + protected ReaderOptions ReaderOptions { get; } + + /// + /// RarArchive is the first volume of a multi-part archive. + /// Only Rar 3.0 format and higher + /// + public virtual bool IsFirstVolume => true; + + /// + /// RarArchive is part of a multi-part archive. + /// + public virtual bool IsMultiVolume => true; + + protected virtual void Dispose(bool disposing) + { + if (disposing) + { + _actualStream.Dispose(); + } + } + + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/DirectoryEndHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/DirectoryEndHeader.cs new file mode 100644 index 0000000000..dddf3c2213 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/DirectoryEndHeader.cs @@ -0,0 +1,44 @@ +using System.IO; + +namespace SharpCompress.Common.Zip.Headers +{ + internal class DirectoryEndHeader : ZipHeader + { + public DirectoryEndHeader() + : base(ZipHeaderType.DirectoryEnd) + { + } + + internal override void Read(BinaryReader reader) + { + VolumeNumber = reader.ReadUInt16(); + FirstVolumeWithDirectory = reader.ReadUInt16(); + TotalNumberOfEntriesInDisk = reader.ReadUInt16(); + TotalNumberOfEntries = reader.ReadUInt16(); + DirectorySize = reader.ReadUInt32(); + DirectoryStartOffsetRelativeToDisk = reader.ReadUInt32(); + CommentLength = reader.ReadUInt16(); + Comment = reader.ReadBytes(CommentLength); + } + + public ushort VolumeNumber { get; private set; } + + public ushort FirstVolumeWithDirectory { get; private set; } + + public ushort TotalNumberOfEntriesInDisk { get; private set; } + + public uint DirectorySize { get; private set; } + + public uint DirectoryStartOffsetRelativeToDisk { get; private set; } + + public ushort CommentLength { get; private set; } + + public byte[] Comment { get; private set; } + + public ushort TotalNumberOfEntries { get; private set; } + + public bool IsZip64 => TotalNumberOfEntriesInDisk == ushort.MaxValue + || DirectorySize == uint.MaxValue + || DirectoryStartOffsetRelativeToDisk == uint.MaxValue; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/DirectoryEntryHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/DirectoryEntryHeader.cs new file mode 100644 index 0000000000..6a2dd04ac2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/DirectoryEntryHeader.cs @@ -0,0 +1,98 @@ +using System.IO; +using System.Linq; +using System.Text; + +namespace SharpCompress.Common.Zip.Headers +{ + internal class DirectoryEntryHeader : ZipFileEntry + { + public DirectoryEntryHeader(ArchiveEncoding archiveEncoding) + : base(ZipHeaderType.DirectoryEntry, archiveEncoding) + { + } + + internal override void Read(BinaryReader reader) + { + Version = reader.ReadUInt16(); + VersionNeededToExtract = reader.ReadUInt16(); + Flags = (HeaderFlags)reader.ReadUInt16(); + CompressionMethod = (ZipCompressionMethod)reader.ReadUInt16(); + LastModifiedTime = reader.ReadUInt16(); + LastModifiedDate = reader.ReadUInt16(); + Crc = reader.ReadUInt32(); + CompressedSize = reader.ReadUInt32(); + UncompressedSize = reader.ReadUInt32(); + ushort nameLength = reader.ReadUInt16(); + ushort extraLength = reader.ReadUInt16(); + ushort commentLength = reader.ReadUInt16(); + DiskNumberStart = reader.ReadUInt16(); + InternalFileAttributes = reader.ReadUInt16(); + ExternalFileAttributes = reader.ReadUInt32(); + RelativeOffsetOfEntryHeader = reader.ReadUInt32(); + + byte[] name = reader.ReadBytes(nameLength); + byte[] extra = reader.ReadBytes(extraLength); + byte[] comment = reader.ReadBytes(commentLength); + + // According to .ZIP File Format Specification + // + // For example: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT + // + // Bit 11: Language encoding flag (EFS). If this bit is set, + // the filename and comment fields for this file + // MUST be encoded using UTF-8. (see APPENDIX D) + + if (Flags.HasFlag(HeaderFlags.Efs)) + { + Name = ArchiveEncoding.DecodeUTF8(name); + Comment = ArchiveEncoding.DecodeUTF8(comment); + } + else + { + Name = ArchiveEncoding.Decode(name); + Comment = ArchiveEncoding.Decode(comment); + } + + LoadExtra(extra); + + var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField); + if (unicodePathExtra != null) + { + Name = ((ExtraUnicodePathExtraField)unicodePathExtra).UnicodeName; + } + + var zip64ExtraData = Extra.OfType().FirstOrDefault(); + if (zip64ExtraData != null) + { + if (CompressedSize == uint.MaxValue) + { + CompressedSize = zip64ExtraData.CompressedSize; + } + + if (UncompressedSize == uint.MaxValue) + { + UncompressedSize = zip64ExtraData.UncompressedSize; + } + + if (RelativeOffsetOfEntryHeader == uint.MaxValue) + { + RelativeOffsetOfEntryHeader = zip64ExtraData.RelativeOffsetOfEntryHeader; + } + } + } + + internal ushort Version { get; private set; } + + public ushort VersionNeededToExtract { get; set; } + + public long RelativeOffsetOfEntryHeader { get; set; } + + public uint ExternalFileAttributes { get; set; } + + public ushort InternalFileAttributes { get; set; } + + public ushort DiskNumberStart { get; set; } + + public string Comment { get; private set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/HeaderFlags.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/HeaderFlags.cs new file mode 100644 index 0000000000..92aaaca73f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/HeaderFlags.cs @@ -0,0 +1,17 @@ +using System; + +namespace SharpCompress.Common.Zip.Headers +{ + [Flags] + internal enum HeaderFlags : ushort + { + None = 0, + Encrypted = 1, // http://www.pkware.com/documents/casestudies/APPNOTE.TXT + Bit1 = 2, + Bit2 = 4, + UsePostDataDescriptor = 8, + EnhancedDeflate = 16, + //Bit 11: Language encoding flag + Efs = 2048 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/IgnoreHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/IgnoreHeader.cs new file mode 100644 index 0000000000..100b8f5eda --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/IgnoreHeader.cs @@ -0,0 +1,17 @@ +using System; +using System.IO; + +namespace SharpCompress.Common.Zip.Headers +{ + internal class IgnoreHeader : ZipHeader + { + public IgnoreHeader(ZipHeaderType type) + : base(type) + { + } + + internal override void Read(BinaryReader reader) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/LocalEntryHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/LocalEntryHeader.cs new file mode 100644 index 0000000000..3bc4375224 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/LocalEntryHeader.cs @@ -0,0 +1,70 @@ +using System.IO; +using System.Linq; +using System.Text; + +namespace SharpCompress.Common.Zip.Headers +{ + internal class LocalEntryHeader : ZipFileEntry + { + public LocalEntryHeader(ArchiveEncoding archiveEncoding) + : base(ZipHeaderType.LocalEntry, archiveEncoding) + { + } + + internal override void Read(BinaryReader reader) + { + Version = reader.ReadUInt16(); + Flags = (HeaderFlags)reader.ReadUInt16(); + CompressionMethod = (ZipCompressionMethod)reader.ReadUInt16(); + LastModifiedTime = reader.ReadUInt16(); + LastModifiedDate = reader.ReadUInt16(); + Crc = reader.ReadUInt32(); + CompressedSize = reader.ReadUInt32(); + UncompressedSize = reader.ReadUInt32(); + ushort nameLength = reader.ReadUInt16(); + ushort extraLength = reader.ReadUInt16(); + byte[] name = reader.ReadBytes(nameLength); + byte[] extra = reader.ReadBytes(extraLength); + + // According to .ZIP File Format Specification + // + // For example: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT + // + // Bit 11: Language encoding flag (EFS). If this bit is set, + // the filename and comment fields for this file + // MUST be encoded using UTF-8. (see APPENDIX D) + + if (Flags.HasFlag(HeaderFlags.Efs)) + { + Name = ArchiveEncoding.DecodeUTF8(name); + } + else + { + Name = ArchiveEncoding.Decode(name); + } + + LoadExtra(extra); + + var unicodePathExtra = Extra.FirstOrDefault(u => u.Type == ExtraDataType.UnicodePathExtraField); + if (unicodePathExtra != null) + { + Name = ((ExtraUnicodePathExtraField)unicodePathExtra).UnicodeName; + } + + var zip64ExtraData = Extra.OfType().FirstOrDefault(); + if (zip64ExtraData != null) + { + if (CompressedSize == uint.MaxValue) + { + CompressedSize = zip64ExtraData.CompressedSize; + } + if (UncompressedSize == uint.MaxValue) + { + UncompressedSize = zip64ExtraData.UncompressedSize; + } + } + } + + internal ushort Version { get; private set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/LocalEntryHeaderExtraFactory.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/LocalEntryHeaderExtraFactory.cs new file mode 100644 index 0000000000..b86d505232 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/LocalEntryHeaderExtraFactory.cs @@ -0,0 +1,149 @@ +using System; +using System.Text; +using SharpCompress.Converters; + +namespace SharpCompress.Common.Zip.Headers +{ + internal enum ExtraDataType : ushort + { + WinZipAes = 0x9901, + + NotImplementedExtraData = 0xFFFF, + + // Third Party Mappings + // -Info-ZIP Unicode Path Extra Field + UnicodePathExtraField = 0x7075, + Zip64ExtendedInformationExtraField = 0x0001 + } + + internal class ExtraData + { + internal ExtraDataType Type { get; set; } + internal ushort Length { get; set; } + internal byte[] DataBytes { get; set; } + } + + internal class ExtraUnicodePathExtraField : ExtraData + { + internal byte Version => DataBytes[0]; + + internal byte[] NameCrc32 + { + get + { + var crc = new byte[4]; + Buffer.BlockCopy(DataBytes, 1, crc, 0, 4); + return crc; + } + } + + internal string UnicodeName + { + get + { + // PathNamelength = dataLength - Version(1 byte) - NameCRC32(4 bytes) + var length = Length - 5; + var nameStr = Encoding.UTF8.GetString(DataBytes, 5, length); + return nameStr; + } + } + } + + internal class Zip64ExtendedInformationExtraField : ExtraData + { + + public Zip64ExtendedInformationExtraField(ExtraDataType type, ushort length, byte[] dataBytes) + { + Type = type; + Length = length; + DataBytes = dataBytes; + Process(); + } + + //From the spec values are only in the extradata if the standard + //value is set to 0xFFFF, but if one of the sizes are present, both are. + //Hence if length == 4 volume only + // if length == 8 offset only + // if length == 12 offset + volume + // if length == 16 sizes only + // if length == 20 sizes + volume + // if length == 24 sizes + offset + // if length == 28 everything. + //It is unclear how many of these are used in the wild. + + private void Process() + { + switch (DataBytes.Length) + { + case 4: + VolumeNumber = DataConverter.LittleEndian.GetUInt32(DataBytes, 0); + return; + case 8: + RelativeOffsetOfEntryHeader = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 0); + return; + case 12: + RelativeOffsetOfEntryHeader = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 0); + VolumeNumber = DataConverter.LittleEndian.GetUInt32(DataBytes, 8); + return; + case 16: + UncompressedSize = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 0); + CompressedSize = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 8); + return; + case 20: + UncompressedSize = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 0); + CompressedSize = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 8); + VolumeNumber = DataConverter.LittleEndian.GetUInt32(DataBytes, 16); + return; + case 24: + UncompressedSize = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 0); + CompressedSize = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 8); + RelativeOffsetOfEntryHeader = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 16); + return; + case 28: + UncompressedSize = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 0); + CompressedSize = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 8); + RelativeOffsetOfEntryHeader = (long)DataConverter.LittleEndian.GetUInt64(DataBytes, 16); + VolumeNumber = DataConverter.LittleEndian.GetUInt32(DataBytes, 24); + return; + default: + throw new ArchiveException("Unexpected size of of Zip64 extended information extra field"); + } + } + + public long UncompressedSize { get; private set; } + public long CompressedSize { get; private set; } + public long RelativeOffsetOfEntryHeader { get; private set; } + public uint VolumeNumber { get; private set; } + } + + internal static class LocalEntryHeaderExtraFactory + { + internal static ExtraData Create(ExtraDataType type, ushort length, byte[] extraData) + { + switch (type) + { + case ExtraDataType.UnicodePathExtraField: + return new ExtraUnicodePathExtraField + { + Type = type, + Length = length, + DataBytes = extraData + }; + case ExtraDataType.Zip64ExtendedInformationExtraField: + return new Zip64ExtendedInformationExtraField + ( + type, + length, + extraData + ); + default: + return new ExtraData + { + Type = type, + Length = length, + DataBytes = extraData + }; + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/SplitHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/SplitHeader.cs new file mode 100644 index 0000000000..ee7ca0b863 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/SplitHeader.cs @@ -0,0 +1,18 @@ +using System; +using System.IO; + +namespace SharpCompress.Common.Zip.Headers +{ + internal class SplitHeader : ZipHeader + { + public SplitHeader() + : base(ZipHeaderType.Split) + { + } + + internal override void Read(BinaryReader reader) + { + throw new NotImplementedException(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndHeader.cs new file mode 100644 index 0000000000..443cbd8a07 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndHeader.cs @@ -0,0 +1,49 @@ +using System; +using System.IO; + +namespace SharpCompress.Common.Zip.Headers +{ + internal class Zip64DirectoryEndHeader : ZipHeader + { + public Zip64DirectoryEndHeader() + : base(ZipHeaderType.Zip64DirectoryEnd) + { + } + + internal override void Read(BinaryReader reader) + { + SizeOfDirectoryEndRecord = (long)reader.ReadUInt64(); + VersionMadeBy = reader.ReadUInt16(); + VersionNeededToExtract = reader.ReadUInt16(); + VolumeNumber = reader.ReadUInt32(); + FirstVolumeWithDirectory = reader.ReadUInt32(); + TotalNumberOfEntriesInDisk = (long)reader.ReadUInt64(); + TotalNumberOfEntries = (long)reader.ReadUInt64(); + DirectorySize = (long)reader.ReadUInt64(); + DirectoryStartOffsetRelativeToDisk = (long)reader.ReadUInt64(); + DataSector = reader.ReadBytes((int)(SizeOfDirectoryEndRecord - SIZE_OF_FIXED_HEADER_DATA_EXCEPT_SIGNATURE_AND_SIZE_FIELDS)); + } + + private const int SIZE_OF_FIXED_HEADER_DATA_EXCEPT_SIGNATURE_AND_SIZE_FIELDS = 44; + + public long SizeOfDirectoryEndRecord { get; private set; } + + public ushort VersionMadeBy { get; private set; } + + public ushort VersionNeededToExtract { get; private set; } + + public uint VolumeNumber { get; private set; } + + public uint FirstVolumeWithDirectory { get; private set; } + + public long TotalNumberOfEntriesInDisk { get; private set; } + + public long TotalNumberOfEntries { get; private set; } + + public long DirectorySize { get; private set; } + + public long DirectoryStartOffsetRelativeToDisk { get; private set; } + + public byte[] DataSector { get; private set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndLocatorHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndLocatorHeader.cs new file mode 100644 index 0000000000..82a6562e3b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndLocatorHeader.cs @@ -0,0 +1,25 @@ +using System.IO; + +namespace SharpCompress.Common.Zip.Headers +{ + internal class Zip64DirectoryEndLocatorHeader : ZipHeader + { + public Zip64DirectoryEndLocatorHeader() + : base(ZipHeaderType.Zip64DirectoryEndLocator) + { + } + + internal override void Read(BinaryReader reader) + { + FirstVolumeWithDirectory = reader.ReadUInt32(); + RelativeOffsetOfTheEndOfDirectoryRecord = (long)reader.ReadUInt64(); + TotalNumberOfVolumes = reader.ReadUInt32(); + } + + public uint FirstVolumeWithDirectory { get; private set; } + + public long RelativeOffsetOfTheEndOfDirectoryRecord { get; private set; } + + public uint TotalNumberOfVolumes { get; private set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipFileEntry.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipFileEntry.cs new file mode 100644 index 0000000000..8a6449c96c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipFileEntry.cs @@ -0,0 +1,102 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; +using SharpCompress.Converters; + +namespace SharpCompress.Common.Zip.Headers +{ + internal abstract class ZipFileEntry : ZipHeader + { + protected ZipFileEntry(ZipHeaderType type, ArchiveEncoding archiveEncoding) + : base(type) + { + Extra = new List(); + ArchiveEncoding = archiveEncoding; + } + + internal bool IsDirectory + { + get + { + if (Name.EndsWith("/")) + { + return true; + } + + //.NET Framework 4.5 : System.IO.Compression::CreateFromDirectory() probably writes backslashes to headers + return CompressedSize == 0 + && UncompressedSize == 0 + && Name.EndsWith("\\"); + } + } + + internal Stream PackedStream { get; set; } + + internal ArchiveEncoding ArchiveEncoding { get; } + + internal string Name { get; set; } + + internal HeaderFlags Flags { get; set; } + + internal ZipCompressionMethod CompressionMethod { get; set; } + + internal long CompressedSize { get; set; } + + internal long? DataStartPosition { get; set; } + + internal long UncompressedSize { get; set; } + + internal List Extra { get; set; } + + public string Password { get; set; } + + internal PkwareTraditionalEncryptionData ComposeEncryptionData(Stream archiveStream) + { + if (archiveStream == null) + { + throw new ArgumentNullException(nameof(archiveStream)); + } + + var buffer = new byte[12]; + archiveStream.ReadFully(buffer); + + PkwareTraditionalEncryptionData encryptionData = PkwareTraditionalEncryptionData.ForRead(Password, this, buffer); + + return encryptionData; + } + +#if !NO_CRYPTO + internal WinzipAesEncryptionData WinzipAesEncryptionData { get; set; } +#endif + + internal ushort LastModifiedDate { get; set; } + + internal ushort LastModifiedTime { get; set; } + + internal uint Crc { get; set; } + + protected void LoadExtra(byte[] extra) + { + for (int i = 0; i < extra.Length - 4;) + { + ExtraDataType type = (ExtraDataType)DataConverter.LittleEndian.GetUInt16(extra, i); + if (!Enum.IsDefined(typeof(ExtraDataType), type)) + { + type = ExtraDataType.NotImplementedExtraData; + } + + ushort length = DataConverter.LittleEndian.GetUInt16(extra, i + 2); + byte[] data = new byte[length]; + Buffer.BlockCopy(extra, i + 4, data, 0, length); + Extra.Add(LocalEntryHeaderExtraFactory.Create(type, length, data)); + + i += length + 4; + } + } + + internal ZipFilePart Part { get; set; } + + internal bool IsZip64 => CompressedSize == uint.MaxValue; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipHeader.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipHeader.cs new file mode 100644 index 0000000000..d62513daf7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipHeader.cs @@ -0,0 +1,19 @@ +using System.IO; + +namespace SharpCompress.Common.Zip.Headers +{ + internal abstract class ZipHeader + { + protected ZipHeader(ZipHeaderType type) + { + ZipHeaderType = type; + HasData = true; + } + + internal ZipHeaderType ZipHeaderType { get; } + + internal abstract void Read(BinaryReader reader); + + internal bool HasData { get; set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipHeaderType.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipHeaderType.cs new file mode 100644 index 0000000000..a4286dedc5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/Headers/ZipHeaderType.cs @@ -0,0 +1,13 @@ +namespace SharpCompress.Common.Zip.Headers +{ + internal enum ZipHeaderType + { + Ignore, + LocalEntry, + DirectoryEntry, + DirectoryEnd, + Split, + Zip64DirectoryEnd, + Zip64DirectoryEndLocator + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/PkwareTraditionalCryptoStream.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/PkwareTraditionalCryptoStream.cs new file mode 100644 index 0000000000..2c3f1400c8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/PkwareTraditionalCryptoStream.cs @@ -0,0 +1,108 @@ +using System; +using System.IO; + +namespace SharpCompress.Common.Zip +{ + internal enum CryptoMode + { + Encrypt, + Decrypt + } + + internal class PkwareTraditionalCryptoStream : Stream + { + private readonly PkwareTraditionalEncryptionData _encryptor; + private readonly CryptoMode _mode; + private readonly Stream _stream; + private bool _isDisposed; + + public PkwareTraditionalCryptoStream(Stream stream, PkwareTraditionalEncryptionData encryptor, CryptoMode mode) + { + this._encryptor = encryptor; + this._stream = stream; + this._mode = mode; + } + + public override bool CanRead => (_mode == CryptoMode.Decrypt); + + public override bool CanSeek => false; + + public override bool CanWrite => (_mode == CryptoMode.Encrypt); + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + if (_mode == CryptoMode.Encrypt) + { + throw new NotSupportedException("This stream does not encrypt via Read()"); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + byte[] temp = new byte[count]; + int readBytes = _stream.Read(temp, 0, count); + byte[] decrypted = _encryptor.Decrypt(temp, readBytes); + Buffer.BlockCopy(decrypted, 0, buffer, offset, readBytes); + return readBytes; + } + + public override void Write(byte[] buffer, int offset, int count) + { + if (_mode == CryptoMode.Decrypt) + { + throw new NotSupportedException("This stream does not Decrypt via Write()"); + } + + if (count == 0) + { + return; + } + + byte[] plaintext = null; + if (offset != 0) + { + plaintext = new byte[count]; + Buffer.BlockCopy(buffer, offset, plaintext, 0, count); + } + else + { + plaintext = buffer; + } + + byte[] encrypted = _encryptor.Encrypt(plaintext, count); + _stream.Write(encrypted, 0, encrypted.Length); + } + + public override void Flush() + { + //throw new NotSupportedException(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + protected override void Dispose(bool disposing) + { + if (_isDisposed) + { + return; + } + _isDisposed = true; + base.Dispose(disposing); + _stream.Dispose(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/PkwareTraditionalEncryptionData.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/PkwareTraditionalEncryptionData.cs new file mode 100644 index 0000000000..2a328003e7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/PkwareTraditionalEncryptionData.cs @@ -0,0 +1,112 @@ +using System; +using System.Text; +using SharpCompress.Common.Zip.Headers; +using SharpCompress.Compressors.Deflate; + +namespace SharpCompress.Common.Zip +{ + internal class PkwareTraditionalEncryptionData + { + private static readonly CRC32 CRC32 = new CRC32(); + private readonly UInt32[] _keys = {0x12345678, 0x23456789, 0x34567890}; + private readonly ArchiveEncoding _archiveEncoding; + + private PkwareTraditionalEncryptionData(string password, ArchiveEncoding archiveEncoding) + { + _archiveEncoding = archiveEncoding; + Initialize(password); + } + + private byte MagicByte + { + get + { + ushort t = (ushort)((ushort)(_keys[2] & 0xFFFF) | 2); + return (byte)((t * (t ^ 1)) >> 8); + } + } + + public static PkwareTraditionalEncryptionData ForRead(string password, ZipFileEntry header, + byte[] encryptionHeader) + { + var encryptor = new PkwareTraditionalEncryptionData(password, header.ArchiveEncoding); + byte[] plainTextHeader = encryptor.Decrypt(encryptionHeader, encryptionHeader.Length); + if (plainTextHeader[11] != (byte)((header.Crc >> 24) & 0xff)) + { + if (!FlagUtility.HasFlag(header.Flags, HeaderFlags.UsePostDataDescriptor)) + { + throw new CryptographicException("The password did not match."); + } + if (plainTextHeader[11] != (byte)((header.LastModifiedTime >> 8) & 0xff)) + { + throw new CryptographicException("The password did not match."); + } + } + return encryptor; + } + + public byte[] Decrypt(byte[] cipherText, int length) + { + if (length > cipherText.Length) + { + throw new ArgumentOutOfRangeException(nameof(length), + "Bad length during Decryption: the length parameter must be smaller than or equal to the size of the destination array."); + } + + var plainText = new byte[length]; + for (int i = 0; i < length; i++) + { + var c = (byte)(cipherText[i] ^ MagicByte); + UpdateKeys(c); + plainText[i] = c; + } + return plainText; + } + + public byte[] Encrypt(byte[] plainText, int length) + { + if (plainText == null) + { + throw new ArgumentNullException("plaintext"); + } + + if (length > plainText.Length) + { + throw new ArgumentOutOfRangeException(nameof(length), + "Bad length during Encryption: The length parameter must be smaller than or equal to the size of the destination array."); + } + + var cipherText = new byte[length]; + for (int i = 0; i < length; i++) + { + byte c = plainText[i]; + cipherText[i] = (byte)(plainText[i] ^ MagicByte); + UpdateKeys(c); + } + return cipherText; + } + + private void Initialize(string password) + { + byte[] p = StringToByteArray(password); + for (int i = 0; i < password.Length; i++) + { + UpdateKeys(p[i]); + } + } + + internal byte[] StringToByteArray(string value) + { + byte[] a = _archiveEncoding.Password.GetBytes(value); + return a; + } + + private void UpdateKeys(byte byteValue) + { + _keys[0] = (UInt32)CRC32.ComputeCrc32((int)_keys[0], byteValue); + _keys[1] = _keys[1] + (byte)_keys[0]; + _keys[1] = _keys[1] * 0x08088405 + 1; + _keys[2] = (UInt32)CRC32.ComputeCrc32((int)_keys[2], (byte)(_keys[1] >> 24)); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/SeekableZipFilePart.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/SeekableZipFilePart.cs new file mode 100644 index 0000000000..d06b34aa14 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/SeekableZipFilePart.cs @@ -0,0 +1,42 @@ +using System.IO; +using SharpCompress.Common.Zip.Headers; + +namespace SharpCompress.Common.Zip +{ + internal class SeekableZipFilePart : ZipFilePart + { + private bool _isLocalHeaderLoaded; + private readonly SeekableZipHeaderFactory _headerFactory; + + internal SeekableZipFilePart(SeekableZipHeaderFactory headerFactory, DirectoryEntryHeader header, Stream stream) + : base(header, stream) + { + _headerFactory = headerFactory; + } + + internal override Stream GetCompressedStream() + { + if (!_isLocalHeaderLoaded) + { + LoadLocalHeader(); + _isLocalHeaderLoaded = true; + } + return base.GetCompressedStream(); + } + + internal string Comment => (Header as DirectoryEntryHeader).Comment; + + private void LoadLocalHeader() + { + bool hasData = Header.HasData; + Header = _headerFactory.GetLocalHeader(BaseStream, Header as DirectoryEntryHeader); + Header.HasData = hasData; + } + + protected override Stream CreateBaseStream() + { + BaseStream.Position = Header.DataStartPosition.Value; + return BaseStream; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/SeekableZipHeaderFactory.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/SeekableZipHeaderFactory.cs new file mode 100644 index 0000000000..23f4cbe85c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/SeekableZipHeaderFactory.cs @@ -0,0 +1,109 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common.Zip.Headers; +using SharpCompress.IO; +using System.Text; + +namespace SharpCompress.Common.Zip +{ + internal class SeekableZipHeaderFactory : ZipHeaderFactory + { + private const int MAX_ITERATIONS_FOR_DIRECTORY_HEADER = 4096; + private bool _zip64; + + internal SeekableZipHeaderFactory(string password, ArchiveEncoding archiveEncoding) + : base(StreamingMode.Seekable, password, archiveEncoding) + { + } + + internal IEnumerable ReadSeekableHeader(Stream stream) + { + var reader = new BinaryReader(stream); + + SeekBackToHeader(stream, reader, DIRECTORY_END_HEADER_BYTES); + var entry = new DirectoryEndHeader(); + entry.Read(reader); + + if (entry.IsZip64) + { + _zip64 = true; + SeekBackToHeader(stream, reader, ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR); + var zip64Locator = new Zip64DirectoryEndLocatorHeader(); + zip64Locator.Read(reader); + + stream.Seek(zip64Locator.RelativeOffsetOfTheEndOfDirectoryRecord, SeekOrigin.Begin); + uint zip64Signature = reader.ReadUInt32(); + if (zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY) + throw new ArchiveException("Failed to locate the Zip64 Header"); + + var zip64Entry = new Zip64DirectoryEndHeader(); + zip64Entry.Read(reader); + stream.Seek(zip64Entry.DirectoryStartOffsetRelativeToDisk, SeekOrigin.Begin); + } + else + { + stream.Seek(entry.DirectoryStartOffsetRelativeToDisk, SeekOrigin.Begin); + } + + long position = stream.Position; + while (true) + { + stream.Position = position; + uint signature = reader.ReadUInt32(); + var nextHeader = ReadHeader(signature, reader, _zip64); + position = stream.Position; + + if (nextHeader == null) + yield break; + + if (nextHeader is DirectoryEntryHeader entryHeader) + { + //entry could be zero bytes so we need to know that. + entryHeader.HasData = entryHeader.CompressedSize != 0; + yield return entryHeader; + } + else if (nextHeader is DirectoryEndHeader endHeader) + { + yield return endHeader; + } + } + } + + private static void SeekBackToHeader(Stream stream, BinaryReader reader, uint headerSignature) + { + long offset = 0; + uint signature; + int iterationCount = 0; + do + { + if ((stream.Length + offset) - 4 < 0) + { + throw new ArchiveException("Failed to locate the Zip Header"); + } + stream.Seek(offset - 4, SeekOrigin.End); + signature = reader.ReadUInt32(); + offset--; + iterationCount++; + if (iterationCount > MAX_ITERATIONS_FOR_DIRECTORY_HEADER) + { + throw new ArchiveException("Could not find Zip file Directory at the end of the file. File may be corrupted."); + } + } + while (signature != headerSignature); + } + + internal LocalEntryHeader GetLocalHeader(Stream stream, DirectoryEntryHeader directoryEntryHeader) + { + stream.Seek(directoryEntryHeader.RelativeOffsetOfEntryHeader, SeekOrigin.Begin); + BinaryReader reader = new BinaryReader(stream); + uint signature = reader.ReadUInt32(); + var localEntryHeader = ReadHeader(signature, reader, _zip64) as LocalEntryHeader; + if (localEntryHeader == null) + { + throw new InvalidOperationException(); + } + return localEntryHeader; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/StreamingZipFilePart.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/StreamingZipFilePart.cs new file mode 100644 index 0000000000..607154138f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/StreamingZipFilePart.cs @@ -0,0 +1,62 @@ +using System.IO; +using SharpCompress.Common.Zip.Headers; +using SharpCompress.Compressors.Deflate; +using SharpCompress.IO; + +namespace SharpCompress.Common.Zip +{ + internal class StreamingZipFilePart : ZipFilePart + { + private Stream _decompressionStream; + + internal StreamingZipFilePart(ZipFileEntry header, Stream stream) + : base(header, stream) + { + } + + protected override Stream CreateBaseStream() + { + return Header.PackedStream; + } + + internal override Stream GetCompressedStream() + { + if (!Header.HasData) + { + return Stream.Null; + } + _decompressionStream = CreateDecompressionStream(GetCryptoStream(CreateBaseStream()), Header.CompressionMethod); + if (LeaveStreamOpen) + { + return new NonDisposingStream(_decompressionStream); + } + return _decompressionStream; + } + + internal BinaryReader FixStreamedFileLocation(ref RewindableStream rewindableStream) + { + if (Header.IsDirectory) + { + return new BinaryReader(rewindableStream); + } + if (Header.HasData && !Skipped) + { + if (_decompressionStream == null) + { + _decompressionStream = GetCompressedStream(); + } + _decompressionStream.Skip(); + + DeflateStream deflateStream = _decompressionStream as DeflateStream; + if (deflateStream != null) + { + rewindableStream.Rewind(deflateStream.InputBuffer); + } + Skipped = true; + } + var reader = new BinaryReader(rewindableStream); + _decompressionStream = null; + return reader; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/StreamingZipHeaderFactory.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/StreamingZipHeaderFactory.cs new file mode 100644 index 0000000000..f17c5d7e38 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/StreamingZipHeaderFactory.cs @@ -0,0 +1,70 @@ +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common.Zip.Headers; +using SharpCompress.IO; +using System.Text; + +namespace SharpCompress.Common.Zip +{ + internal class StreamingZipHeaderFactory : ZipHeaderFactory + { + internal StreamingZipHeaderFactory(string password, ArchiveEncoding archiveEncoding) + : base(StreamingMode.Streaming, password, archiveEncoding) + { + } + + internal IEnumerable ReadStreamHeader(Stream stream) + { + RewindableStream rewindableStream; + if (stream is RewindableStream) + { + rewindableStream = stream as RewindableStream; + } + else + { + rewindableStream = new RewindableStream(stream); + } + while (true) + { + ZipHeader header = null; + BinaryReader reader = new BinaryReader(rewindableStream); + if (_lastEntryHeader != null && + (FlagUtility.HasFlag(_lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor) || _lastEntryHeader.IsZip64)) + { + reader = (_lastEntryHeader.Part as StreamingZipFilePart).FixStreamedFileLocation(ref rewindableStream); + long? pos = rewindableStream.CanSeek ? (long?)rewindableStream.Position : null; + uint crc = reader.ReadUInt32(); + if (crc == POST_DATA_DESCRIPTOR) + { + crc = reader.ReadUInt32(); + } + _lastEntryHeader.Crc = crc; + _lastEntryHeader.CompressedSize = reader.ReadUInt32(); + _lastEntryHeader.UncompressedSize = reader.ReadUInt32(); + if (pos.HasValue) + { + _lastEntryHeader.DataStartPosition = pos - _lastEntryHeader.CompressedSize; + } + } + _lastEntryHeader = null; + uint headerBytes = reader.ReadUInt32(); + header = ReadHeader(headerBytes, reader); + if (header == null) { yield break; } + + //entry could be zero bytes so we need to know that. + if (header.ZipHeaderType == ZipHeaderType.LocalEntry) + { + bool isRecording = rewindableStream.IsRecording; + if (!isRecording) + { + rewindableStream.StartRecording(); + } + uint nextHeaderBytes = reader.ReadUInt32(); + header.HasData = !IsHeader(nextHeaderBytes); + rewindableStream.Rewind(!isRecording); + } + yield return header; + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesCryptoStream.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesCryptoStream.cs new file mode 100644 index 0000000000..62e6cff233 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesCryptoStream.cs @@ -0,0 +1,184 @@ + +#if !NO_CRYPTO +using System; +using System.IO; +using System.Security.Cryptography; +using SharpCompress.Converters; + +namespace SharpCompress.Common.Zip +{ + internal class WinzipAesCryptoStream : Stream + { + private const int BLOCK_SIZE_IN_BYTES = 16; + private readonly SymmetricAlgorithm _cipher; + private readonly byte[] _counter = new byte[BLOCK_SIZE_IN_BYTES]; + private readonly Stream _stream; + private readonly ICryptoTransform _transform; + private int _nonce = 1; + private byte[] _counterOut = new byte[BLOCK_SIZE_IN_BYTES]; + private bool _isFinalBlock; + private long _totalBytesLeftToRead; + private bool _isDisposed; + + internal WinzipAesCryptoStream(Stream stream, WinzipAesEncryptionData winzipAesEncryptionData, long length) + { + this._stream = stream; + _totalBytesLeftToRead = length; + + _cipher = CreateCipher(winzipAesEncryptionData); + + var iv = new byte[BLOCK_SIZE_IN_BYTES]; + _transform = _cipher.CreateEncryptor(winzipAesEncryptionData.KeyBytes, iv); + } + + private SymmetricAlgorithm CreateCipher(WinzipAesEncryptionData winzipAesEncryptionData) + { + var cipher = Aes.Create(); + cipher.BlockSize = BLOCK_SIZE_IN_BYTES * 8; + cipher.KeySize = winzipAesEncryptionData.KeyBytes.Length * 8; + cipher.Mode = CipherMode.ECB; + cipher.Padding = PaddingMode.None; + return cipher; + } + + public override bool CanRead + { + get { return true; } + } + + public override bool CanSeek + { + get { return false; } + } + + public override bool CanWrite + { + get { return false; } + } + + public override long Length + { + get { throw new NotSupportedException(); } + } + + public override long Position + { + get { throw new NotSupportedException(); } + set { throw new NotSupportedException(); } + } + + protected override void Dispose(bool disposing) + { + if (_isDisposed) + { + return; + } + _isDisposed = true; + if (disposing) + { + //read out last 10 auth bytes + var ten = new byte[10]; + _stream.ReadFully(ten); + _stream.Dispose(); + } + } + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override int Read(byte[] buffer, int offset, int count) + { + if (_totalBytesLeftToRead == 0) + { + return 0; + } + int bytesToRead = count; + if (count > _totalBytesLeftToRead) + { + bytesToRead = (int)_totalBytesLeftToRead; + } + int read = _stream.Read(buffer, offset, bytesToRead); + _totalBytesLeftToRead -= read; + + ReadTransformBlocks(buffer, offset, read); + + return read; + } + + private int ReadTransformOneBlock(byte[] buffer, int offset, int last) + { + if (_isFinalBlock) + { + throw new InvalidOperationException(); + } + + int bytesRemaining = last - offset; + int bytesToRead = (bytesRemaining > BLOCK_SIZE_IN_BYTES) + ? BLOCK_SIZE_IN_BYTES + : bytesRemaining; + + // update the counter + DataConverter.LittleEndian.PutBytes(_counter, 0, _nonce++); + + // Determine if this is the final block + if ((bytesToRead == bytesRemaining) && (_totalBytesLeftToRead == 0)) + { + _counterOut = _transform.TransformFinalBlock(_counter, + 0, + BLOCK_SIZE_IN_BYTES); + _isFinalBlock = true; + } + else + { + _transform.TransformBlock(_counter, + 0, // offset + BLOCK_SIZE_IN_BYTES, + _counterOut, + 0); // offset + } + + XorInPlace(buffer, offset, bytesToRead); + return bytesToRead; + } + + + private void XorInPlace(byte[] buffer, int offset, int count) + { + for (int i = 0; i < count; i++) + { + buffer[offset + i] = (byte)(_counterOut[i] ^ buffer[offset + i]); + } + } + + private void ReadTransformBlocks(byte[] buffer, int offset, int count) + { + int posn = offset; + int last = count + offset; + + while (posn < buffer.Length && posn < last) + { + int n = ReadTransformOneBlock(buffer, posn, last); + posn += n; + } + } + + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesEncryptionData.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesEncryptionData.cs new file mode 100644 index 0000000000..49f3dd243f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesEncryptionData.cs @@ -0,0 +1,79 @@ + +#if !NO_CRYPTO +using System; +using System.Security.Cryptography; +using SharpCompress.Converters; + +namespace SharpCompress.Common.Zip +{ + internal class WinzipAesEncryptionData + { + private const int RFC2898_ITERATIONS = 1000; + + private readonly byte[] _salt; + private readonly WinzipAesKeySize _keySize; + private readonly byte[] _passwordVerifyValue; + private readonly string _password; + + private byte[] _generatedVerifyValue; + + internal WinzipAesEncryptionData(WinzipAesKeySize keySize, byte[] salt, byte[] passwordVerifyValue, + string password) + { + this._keySize = keySize; + this._salt = salt; + this._passwordVerifyValue = passwordVerifyValue; + this._password = password; + Initialize(); + } + + internal byte[] IvBytes +{ + get; set; +} + internal byte[] KeyBytes +{ + get; set; +} + + private int KeySizeInBytes + { + get { return KeyLengthInBytes(_keySize); +} + } + + internal static int KeyLengthInBytes(WinzipAesKeySize keySize) + { + switch (keySize) + { + case WinzipAesKeySize.KeySize128: + return 16; + case WinzipAesKeySize.KeySize192: + return 24; + case WinzipAesKeySize.KeySize256: + return 32; + } + throw new InvalidOperationException(); + } + + private void Initialize() + { + var rfc2898 = new Rfc2898DeriveBytes(_password, _salt, RFC2898_ITERATIONS); + + KeyBytes = rfc2898.GetBytes(KeySizeInBytes); // 16 or 24 or 32 ??? + IvBytes = rfc2898.GetBytes(KeySizeInBytes); + _generatedVerifyValue = rfc2898.GetBytes(2); + + short verify = DataConverter.LittleEndian.GetInt16(_passwordVerifyValue, 0); + if (_password != null) + { + short generated = DataConverter.LittleEndian.GetInt16(_generatedVerifyValue, 0); + if (verify != generated) + { + throw new InvalidFormatException("bad password"); + } + } + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesKeySize.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesKeySize.cs new file mode 100644 index 0000000000..4a39f46d36 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/WinzipAesKeySize.cs @@ -0,0 +1,9 @@ +namespace SharpCompress.Common.Zip +{ + internal enum WinzipAesKeySize + { + KeySize128 = 1, + KeySize192 = 2, + KeySize256 = 3 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipCompressionMethod.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipCompressionMethod.cs new file mode 100644 index 0000000000..bf7df04098 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipCompressionMethod.cs @@ -0,0 +1,13 @@ +namespace SharpCompress.Common.Zip +{ + internal enum ZipCompressionMethod + { + None = 0, + Deflate = 8, + Deflate64 = 9, + BZip2 = 12, + LZMA = 14, + PPMd = 98, + WinzipAes = 0x63 //http://www.winzip.com/aes_info.htm + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipEntry.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipEntry.cs new file mode 100644 index 0000000000..3f65adaeac --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipEntry.cs @@ -0,0 +1,85 @@ +using System; +using System.Collections.Generic; +using SharpCompress.Common.Zip.Headers; + +namespace SharpCompress.Common.Zip +{ + public class ZipEntry : Entry + { + private readonly ZipFilePart _filePart; + + internal ZipEntry(ZipFilePart filePart) + { + if (filePart != null) + { + this._filePart = filePart; + LastModifiedTime = Utility.DosDateToDateTime(filePart.Header.LastModifiedDate, + filePart.Header.LastModifiedTime); + } + } + + public override CompressionType CompressionType + { + get + { + switch (_filePart.Header.CompressionMethod) + { + case ZipCompressionMethod.BZip2: + { + return CompressionType.BZip2; + } + case ZipCompressionMethod.Deflate: + { + return CompressionType.Deflate; + } + case ZipCompressionMethod.Deflate64: + { + return CompressionType.Deflate64; + } + case ZipCompressionMethod.LZMA: + { + return CompressionType.LZMA; + } + case ZipCompressionMethod.PPMd: + { + return CompressionType.PPMd; + } + case ZipCompressionMethod.None: + { + return CompressionType.None; + } + default: + { + return CompressionType.Unknown; + } + } + } + } + + public override long Crc => _filePart.Header.Crc; + + public override string Key => _filePart.Header.Name; + + public override string LinkTarget => null; + + public override long CompressedSize => _filePart.Header.CompressedSize; + + public override long Size => _filePart.Header.UncompressedSize; + + public override DateTime? LastModifiedTime { get; } + + public override DateTime? CreatedTime => null; + + public override DateTime? LastAccessedTime => null; + + public override DateTime? ArchivedTime => null; + + public override bool IsEncrypted => FlagUtility.HasFlag(_filePart.Header.Flags, HeaderFlags.Encrypted); + + public override bool IsDirectory => _filePart.Header.IsDirectory; + + public override bool IsSplitAfter => false; + + internal override IEnumerable Parts => _filePart.AsEnumerable(); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipFilePart.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipFilePart.cs new file mode 100644 index 0000000000..f68a1f6fe6 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipFilePart.cs @@ -0,0 +1,187 @@ +using System; +using System.IO; +using System.Linq; +using SharpCompress.Common.Zip.Headers; +using SharpCompress.Compressors; +using SharpCompress.Compressors.BZip2; +using SharpCompress.Compressors.Deflate; +using SharpCompress.Compressors.Deflate64; +using SharpCompress.Compressors.LZMA; +using SharpCompress.Compressors.PPMd; +using SharpCompress.Converters; +using SharpCompress.IO; + +namespace SharpCompress.Common.Zip +{ + internal abstract class ZipFilePart : FilePart + { + internal ZipFilePart(ZipFileEntry header, Stream stream) + : base(header.ArchiveEncoding) + { + Header = header; + header.Part = this; + BaseStream = stream; + } + + internal Stream BaseStream { get; } + internal ZipFileEntry Header { get; set; } + + internal override string FilePartName => Header.Name; + + internal override Stream GetCompressedStream() + { + if (!Header.HasData) + { + return Stream.Null; + } + Stream decompressionStream = CreateDecompressionStream(GetCryptoStream(CreateBaseStream()), Header.CompressionMethod); + if (LeaveStreamOpen) + { + return new NonDisposingStream(decompressionStream); + } + return decompressionStream; + } + + internal override Stream GetRawStream() + { + if (!Header.HasData) + { + return Stream.Null; + } + return CreateBaseStream(); + } + + protected abstract Stream CreateBaseStream(); + + protected bool LeaveStreamOpen => FlagUtility.HasFlag(Header.Flags, HeaderFlags.UsePostDataDescriptor) || Header.IsZip64; + + protected Stream CreateDecompressionStream(Stream stream, ZipCompressionMethod method) + { + switch (method) + { + case ZipCompressionMethod.None: + { + return stream; + } + case ZipCompressionMethod.Deflate: + { + return new DeflateStream(stream, CompressionMode.Decompress); + } + case ZipCompressionMethod.Deflate64: + { + return new Deflate64Stream(stream, CompressionMode.Decompress); + } + case ZipCompressionMethod.BZip2: + { + return new BZip2Stream(stream, CompressionMode.Decompress, false); + } + case ZipCompressionMethod.LZMA: + { + if (FlagUtility.HasFlag(Header.Flags, HeaderFlags.Encrypted)) + { + throw new NotSupportedException("LZMA with pkware encryption."); + } + var reader = new BinaryReader(stream); + reader.ReadUInt16(); //LZMA version + var props = new byte[reader.ReadUInt16()]; + reader.Read(props, 0, props.Length); + return new LzmaStream(props, stream, + Header.CompressedSize > 0 ? Header.CompressedSize - 4 - props.Length : -1, + FlagUtility.HasFlag(Header.Flags, HeaderFlags.Bit1) + ? -1 + : (long)Header.UncompressedSize); + } + case ZipCompressionMethod.PPMd: + { + var props = new byte[2]; + stream.ReadFully(props); + return new PpmdStream(new PpmdProperties(props), stream, false); + } + case ZipCompressionMethod.WinzipAes: + { + ExtraData data = Header.Extra.Where(x => x.Type == ExtraDataType.WinZipAes).SingleOrDefault(); + if (data == null) + { + throw new InvalidFormatException("No Winzip AES extra data found."); + } + if (data.Length != 7) + { + throw new InvalidFormatException("Winzip data length is not 7."); + } + ushort compressedMethod = DataConverter.LittleEndian.GetUInt16(data.DataBytes, 0); + + if (compressedMethod != 0x01 && compressedMethod != 0x02) + { + throw new InvalidFormatException("Unexpected vendor version number for WinZip AES metadata"); + } + + ushort vendorId = DataConverter.LittleEndian.GetUInt16(data.DataBytes, 2); + if (vendorId != 0x4541) + { + throw new InvalidFormatException("Unexpected vendor ID for WinZip AES metadata"); + } + return CreateDecompressionStream(stream, (ZipCompressionMethod)DataConverter.LittleEndian.GetUInt16(data.DataBytes, 5)); + } + default: + { + throw new NotSupportedException("CompressionMethod: " + Header.CompressionMethod); + } + } + } + + protected Stream GetCryptoStream(Stream plainStream) + { + bool isFileEncrypted = FlagUtility.HasFlag(Header.Flags, HeaderFlags.Encrypted); + + if (Header.CompressedSize == 0 && isFileEncrypted) + { + throw new NotSupportedException("Cannot encrypt file with unknown size at start."); + } + + if ((Header.CompressedSize == 0 + && FlagUtility.HasFlag(Header.Flags, HeaderFlags.UsePostDataDescriptor)) + || Header.IsZip64) + { + plainStream = new NonDisposingStream(plainStream); //make sure AES doesn't close + } + else + { + plainStream = new ReadOnlySubStream(plainStream, Header.CompressedSize); //make sure AES doesn't close + } + + if (isFileEncrypted) + { + switch (Header.CompressionMethod) + { + case ZipCompressionMethod.None: + case ZipCompressionMethod.Deflate: + case ZipCompressionMethod.Deflate64: + case ZipCompressionMethod.BZip2: + case ZipCompressionMethod.LZMA: + case ZipCompressionMethod.PPMd: + { + return new PkwareTraditionalCryptoStream(plainStream, Header.ComposeEncryptionData(plainStream), CryptoMode.Decrypt); + } + + case ZipCompressionMethod.WinzipAes: + { +#if !NO_FILE + if (Header.WinzipAesEncryptionData != null) + { + return new WinzipAesCryptoStream(plainStream, Header.WinzipAesEncryptionData, Header.CompressedSize - 10); + } +#endif + return plainStream; + } + + default: + { + throw new ArgumentOutOfRangeException(); + } + + } + } + return plainStream; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipHeaderFactory.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipHeaderFactory.cs new file mode 100644 index 0000000000..3810da0ab5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipHeaderFactory.cs @@ -0,0 +1,192 @@ +using System; +using System.IO; +#if !NO_CRYPTO +using System.Linq; +#endif +using SharpCompress.Common.Zip.Headers; +using SharpCompress.IO; +using System.Text; + +namespace SharpCompress.Common.Zip +{ + internal class ZipHeaderFactory + { + internal const uint ENTRY_HEADER_BYTES = 0x04034b50; + internal const uint POST_DATA_DESCRIPTOR = 0x08074b50; + internal const uint DIRECTORY_START_HEADER_BYTES = 0x02014b50; + internal const uint DIRECTORY_END_HEADER_BYTES = 0x06054b50; + internal const uint DIGITAL_SIGNATURE = 0x05054b50; + internal const uint SPLIT_ARCHIVE_HEADER_BYTES = 0x30304b50; + + internal const uint ZIP64_END_OF_CENTRAL_DIRECTORY = 0x06064b50; + internal const uint ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR = 0x07064b50; + + protected LocalEntryHeader _lastEntryHeader; + private readonly string _password; + private readonly StreamingMode _mode; + private readonly ArchiveEncoding _archiveEncoding; + + protected ZipHeaderFactory(StreamingMode mode, string password, ArchiveEncoding archiveEncoding) + { + this._mode = mode; + this._password = password; + this._archiveEncoding = archiveEncoding; + } + + protected ZipHeader ReadHeader(uint headerBytes, BinaryReader reader, bool zip64 = false) + { + switch (headerBytes) + { + case ENTRY_HEADER_BYTES: + { + var entryHeader = new LocalEntryHeader(_archiveEncoding); + entryHeader.Read(reader); + LoadHeader(entryHeader, reader.BaseStream); + + _lastEntryHeader = entryHeader; + return entryHeader; + } + case DIRECTORY_START_HEADER_BYTES: + { + var entry = new DirectoryEntryHeader(_archiveEncoding); + entry.Read(reader); + return entry; + } + case POST_DATA_DESCRIPTOR: + { + if (FlagUtility.HasFlag(_lastEntryHeader.Flags, HeaderFlags.UsePostDataDescriptor)) + { + _lastEntryHeader.Crc = reader.ReadUInt32(); + _lastEntryHeader.CompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32(); + _lastEntryHeader.UncompressedSize = zip64 ? (long)reader.ReadUInt64() : reader.ReadUInt32(); + } + else + { + reader.ReadBytes(zip64 ? 20 : 12); + } + return null; + } + case DIGITAL_SIGNATURE: + return null; + case DIRECTORY_END_HEADER_BYTES: + { + var entry = new DirectoryEndHeader(); + entry.Read(reader); + return entry; + } + case SPLIT_ARCHIVE_HEADER_BYTES: + { + return new SplitHeader(); + } + case ZIP64_END_OF_CENTRAL_DIRECTORY: + { + var entry = new Zip64DirectoryEndHeader(); + entry.Read(reader); + return entry; + } + case ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR: + { + var entry = new Zip64DirectoryEndLocatorHeader(); + entry.Read(reader); + return entry; + } + default: + return null; + } + } + + internal static bool IsHeader(uint headerBytes) + { + switch (headerBytes) + { + case ENTRY_HEADER_BYTES: + case DIRECTORY_START_HEADER_BYTES: + case POST_DATA_DESCRIPTOR: + case DIGITAL_SIGNATURE: + case DIRECTORY_END_HEADER_BYTES: + case SPLIT_ARCHIVE_HEADER_BYTES: + case ZIP64_END_OF_CENTRAL_DIRECTORY: + case ZIP64_END_OF_CENTRAL_DIRECTORY_LOCATOR: + return true; + default: + return false; + } + } + + private void LoadHeader(ZipFileEntry entryHeader, Stream stream) + { + if (FlagUtility.HasFlag(entryHeader.Flags, HeaderFlags.Encrypted)) + { + if (!entryHeader.IsDirectory && entryHeader.CompressedSize == 0 && + FlagUtility.HasFlag(entryHeader.Flags, HeaderFlags.UsePostDataDescriptor)) + { + throw new NotSupportedException("SharpCompress cannot currently read non-seekable Zip Streams with encrypted data that has been written in a non-seekable manner."); + } + + if (_password == null) + { + throw new CryptographicException("No password supplied for encrypted zip."); + } + + entryHeader.Password = _password; + + if (entryHeader.CompressionMethod == ZipCompressionMethod.WinzipAes) + { +#if NO_CRYPTO + throw new NotSupportedException("Cannot decrypt Winzip AES with Silverlight or WP7."); +#else + + ExtraData data = entryHeader.Extra.SingleOrDefault(x => x.Type == ExtraDataType.WinZipAes); + if (data != null) + { + var keySize = (WinzipAesKeySize)data.DataBytes[4]; + + var salt = new byte[WinzipAesEncryptionData.KeyLengthInBytes(keySize) / 2]; + var passwordVerifyValue = new byte[2]; + stream.Read(salt, 0, salt.Length); + stream.Read(passwordVerifyValue, 0, 2); + entryHeader.WinzipAesEncryptionData = + new WinzipAesEncryptionData(keySize, salt, passwordVerifyValue, _password); + + entryHeader.CompressedSize -= (uint)(salt.Length + 2); + } +#endif + } + } + + if (entryHeader.IsDirectory) + { + return; + } + + //if (FlagUtility.HasFlag(entryHeader.Flags, HeaderFlags.UsePostDataDescriptor)) + //{ + // entryHeader.PackedStream = new ReadOnlySubStream(stream); + //} + //else + //{ + switch (_mode) + { + case StreamingMode.Seekable: + { + entryHeader.DataStartPosition = stream.Position; + stream.Position += entryHeader.CompressedSize; + break; + } + + case StreamingMode.Streaming: + { + entryHeader.PackedStream = stream; + break; + } + + default: + { + throw new InvalidFormatException("Invalid StreamingMode"); + } + } + + //} + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipVolume.cs b/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipVolume.cs new file mode 100644 index 0000000000..4f6d52c8a3 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Common/Zip/ZipVolume.cs @@ -0,0 +1,15 @@ +using System.IO; +using SharpCompress.Readers; + +namespace SharpCompress.Common.Zip +{ + public class ZipVolume : Volume + { + public ZipVolume(Stream stream, ReaderOptions readerOptions) + : base(stream, readerOptions) + { + } + + public string Comment { get; internal set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/ADC/ADCBase.cs b/BizHawk.Client.Common/SharpCompress/Compressors/ADC/ADCBase.cs new file mode 100644 index 0000000000..dfca000eda --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/ADC/ADCBase.cs @@ -0,0 +1,220 @@ +// +// ADC.cs +// +// Author: +// Natalia Portillo +// +// Copyright (c) 2016 © Claunia.com +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +using System; +using System.IO; + +namespace SharpCompress.Compressors.ADC +{ + /// + /// Provides static methods for decompressing Apple Data Compression data + /// + public static class ADCBase + { + private const int PLAIN = 1; + private const int TWO_BYTE = 2; + private const int THREE_BYTE = 3; + + private static int GetChunkType(byte byt) + { + if ((byt & 0x80) == 0x80) + { + return PLAIN; + } + if ((byt & 0x40) == 0x40) + { + return THREE_BYTE; + } + return TWO_BYTE; + } + + private static int GetChunkSize(byte byt) + { + switch (GetChunkType(byt)) + { + case PLAIN: + return (byt & 0x7F) + 1; + case TWO_BYTE: + return ((byt & 0x3F) >> 2) + 3; + case THREE_BYTE: + return (byt & 0x3F) + 4; + default: + return -1; + } + } + + private static int GetOffset(byte[] chunk, int position) + { + switch (GetChunkType(chunk[position])) + { + case PLAIN: + return 0; + case TWO_BYTE: + return ((chunk[position] & 0x03) << 8) + chunk[position + 1]; + case THREE_BYTE: + return (chunk[position + 1] << 8) + chunk[position + 2]; + default: + return -1; + } + } + + /// + /// Decompresses a byte buffer that's compressed with ADC + /// + /// Compressed buffer + /// Buffer to hold decompressed data + /// Max size for decompressed data + /// How many bytes are stored on + public static int Decompress(byte[] input, out byte[] output, int bufferSize = 262144) + { + return Decompress(new MemoryStream(input), out output, bufferSize); + } + + /// + /// Decompresses a stream that's compressed with ADC + /// + /// Stream containing compressed data + /// Buffer to hold decompressed data + /// Max size for decompressed data + /// How many bytes are stored on + public static int Decompress(Stream input, out byte[] output, int bufferSize = 262144) + { + output = null; + + if (input == null || input.Length == 0) + { + return 0; + } + + int start = (int)input.Position; + int position = (int)input.Position; + int chunkSize; + int offset; + int chunkType; + byte[] buffer = new byte[bufferSize]; + int outPosition = 0; + bool full = false; + MemoryStream tempMs; + + while (position < input.Length) + { + int readByte = input.ReadByte(); + if (readByte == -1) + { + break; + } + + chunkType = GetChunkType((byte)readByte); + + switch (chunkType) + { + case PLAIN: + chunkSize = GetChunkSize((byte)readByte); + if (outPosition + chunkSize > bufferSize) + { + full = true; + break; + } + input.Read(buffer, outPosition, chunkSize); + outPosition += chunkSize; + position += chunkSize + 1; + break; + case TWO_BYTE: + tempMs = new MemoryStream(); + chunkSize = GetChunkSize((byte)readByte); + tempMs.WriteByte((byte)readByte); + tempMs.WriteByte((byte)input.ReadByte()); + offset = GetOffset(tempMs.ToArray(), 0); + if (outPosition + chunkSize > bufferSize) + { + full = true; + break; + } + if (offset == 0) + { + byte lastByte = buffer[outPosition - 1]; + for (int i = 0; i < chunkSize; i++) + { + buffer[outPosition] = lastByte; + outPosition++; + } + position += 2; + } + else + { + for (int i = 0; i < chunkSize; i++) + { + buffer[outPosition] = buffer[outPosition - offset - 1]; + outPosition++; + } + position += 2; + } + break; + case THREE_BYTE: + tempMs = new MemoryStream(); + chunkSize = GetChunkSize((byte)readByte); + tempMs.WriteByte((byte)readByte); + tempMs.WriteByte((byte)input.ReadByte()); + tempMs.WriteByte((byte)input.ReadByte()); + offset = GetOffset(tempMs.ToArray(), 0); + if (outPosition + chunkSize > bufferSize) + { + full = true; + break; + } + if (offset == 0) + { + byte lastByte = buffer[outPosition - 1]; + for (int i = 0; i < chunkSize; i++) + { + buffer[outPosition] = lastByte; + outPosition++; + } + position += 3; + } + else + { + for (int i = 0; i < chunkSize; i++) + { + buffer[outPosition] = buffer[outPosition - offset - 1]; + outPosition++; + } + position += 3; + } + break; + } + + if (full) + { + break; + } + } + + output = new byte[outPosition]; + Array.Copy(buffer, 0, output, 0, outPosition); + return position - start; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/ADC/ADCStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/ADC/ADCStream.cs new file mode 100644 index 0000000000..d8799fdd5b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/ADC/ADCStream.cs @@ -0,0 +1,173 @@ +// +// ADC.cs +// +// Author: +// Natalia Portillo +// +// Copyright (c) 2016 © Claunia.com +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +using System; +using System.IO; + +namespace SharpCompress.Compressors.ADC +{ + /// + /// Provides a forward readable only stream that decompresses ADC data + /// + public class ADCStream : Stream + { + /// + /// This stream holds the compressed data + /// + private readonly Stream _stream; + + /// + /// Is this instance disposed? + /// + private bool _isDisposed; + + /// + /// Position in decompressed data + /// + private long _position; + + /// + /// Buffer with currently used chunk of decompressed data + /// + private byte[] _outBuffer; + + /// + /// Position in buffer of decompressed data + /// + private int _outPosition; + + /// + /// Initializates a stream that decompresses ADC data on the fly + /// + /// Stream that contains the compressed data + /// Must be set to because compression is not implemented + public ADCStream(Stream stream, CompressionMode compressionMode = CompressionMode.Decompress) + { + if (compressionMode == CompressionMode.Compress) + { + throw new NotSupportedException(); + } + + _stream = stream; + } + + public override bool CanRead => _stream.CanRead; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => _position; set => throw new NotSupportedException(); } + + public override void Flush() + { + } + + protected override void Dispose(bool disposing) + { + if (_isDisposed) + { + return; + } + _isDisposed = true; + base.Dispose(disposing); + } + + public override int Read(byte[] buffer, int offset, int count) + { + if (count == 0) + { + return 0; + } + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + if (offset < buffer.GetLowerBound(0)) + { + throw new ArgumentOutOfRangeException(nameof(offset)); + } + if ((offset + count) > buffer.GetLength(0)) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + + int size = -1; + + if (_outBuffer == null) + { + size = ADCBase.Decompress(_stream, out _outBuffer); + _outPosition = 0; + } + + int inPosition = offset; + int toCopy = count; + int copied = 0; + + while (_outPosition + toCopy >= _outBuffer.Length) + { + int piece = _outBuffer.Length - _outPosition; + Array.Copy(_outBuffer, _outPosition, buffer, inPosition, piece); + inPosition += piece; + copied += piece; + _position += piece; + toCopy -= piece; + size = ADCBase.Decompress(_stream, out _outBuffer); + _outPosition = 0; + if (size == 0 || _outBuffer == null || _outBuffer.Length == 0) + { + return copied; + } + } + + Array.Copy(_outBuffer, _outPosition, buffer, inPosition, toCopy); + _outPosition += toCopy; + _position += toCopy; + copied += toCopy; + return copied; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/BZip2Constants.cs b/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/BZip2Constants.cs new file mode 100644 index 0000000000..ed5068447b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/BZip2Constants.cs @@ -0,0 +1,101 @@ +/* + * Copyright 2001,2004-2005 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This package is based on the work done by Keiron Liddle, Aftex Software + * to whom the Ant project is very grateful for his + * great code. + */ + +namespace SharpCompress.Compressors.BZip2 +{ + /** + * Base class for both the compress and decompress classes. + * Holds common arrays, and static data. + * + * @author Keiron Liddle + */ + + internal class BZip2Constants + { + public const int baseBlockSize = 100000; + public const int MAX_ALPHA_SIZE = 258; + public const int MAX_CODE_LEN = 23; + public const int RUNA = 0; + public const int RUNB = 1; + public const int N_GROUPS = 6; + public const int G_SIZE = 50; + public const int N_ITERS = 4; + public const int MAX_SELECTORS = (2 + (900000 / G_SIZE)); + public const int NUM_OVERSHOOT_BYTES = 20; + + public static int[] rNums = + { + 619, 720, 127, 481, 931, 816, 813, 233, 566, 247, + 985, 724, 205, 454, 863, 491, 741, 242, 949, 214, + 733, 859, 335, 708, 621, 574, 73, 654, 730, 472, + 419, 436, 278, 496, 867, 210, 399, 680, 480, 51, + 878, 465, 811, 169, 869, 675, 611, 697, 867, 561, + 862, 687, 507, 283, 482, 129, 807, 591, 733, 623, + 150, 238, 59, 379, 684, 877, 625, 169, 643, 105, + 170, 607, 520, 932, 727, 476, 693, 425, 174, 647, + 73, 122, 335, 530, 442, 853, 695, 249, 445, 515, + 909, 545, 703, 919, 874, 474, 882, 500, 594, 612, + 641, 801, 220, 162, 819, 984, 589, 513, 495, 799, + 161, 604, 958, 533, 221, 400, 386, 867, 600, 782, + 382, 596, 414, 171, 516, 375, 682, 485, 911, 276, + 98, 553, 163, 354, 666, 933, 424, 341, 533, 870, + 227, 730, 475, 186, 263, 647, 537, 686, 600, 224, + 469, 68, 770, 919, 190, 373, 294, 822, 808, 206, + 184, 943, 795, 384, 383, 461, 404, 758, 839, 887, + 715, 67, 618, 276, 204, 918, 873, 777, 604, 560, + 951, 160, 578, 722, 79, 804, 96, 409, 713, 940, + 652, 934, 970, 447, 318, 353, 859, 672, 112, 785, + 645, 863, 803, 350, 139, 93, 354, 99, 820, 908, + 609, 772, 154, 274, 580, 184, 79, 626, 630, 742, + 653, 282, 762, 623, 680, 81, 927, 626, 789, 125, + 411, 521, 938, 300, 821, 78, 343, 175, 128, 250, + 170, 774, 972, 275, 999, 639, 495, 78, 352, 126, + 857, 956, 358, 619, 580, 124, 737, 594, 701, 612, + 669, 112, 134, 694, 363, 992, 809, 743, 168, 974, + 944, 375, 748, 52, 600, 747, 642, 182, 862, 81, + 344, 805, 988, 739, 511, 655, 814, 334, 249, 515, + 897, 955, 664, 981, 649, 113, 974, 459, 893, 228, + 433, 837, 553, 268, 926, 240, 102, 654, 459, 51, + 686, 754, 806, 760, 493, 403, 415, 394, 687, 700, + 946, 670, 656, 610, 738, 392, 760, 799, 887, 653, + 978, 321, 576, 617, 626, 502, 894, 679, 243, 440, + 680, 879, 194, 572, 640, 724, 926, 56, 204, 700, + 707, 151, 457, 449, 797, 195, 791, 558, 945, 679, + 297, 59, 87, 824, 713, 663, 412, 693, 342, 606, + 134, 108, 571, 364, 631, 212, 174, 643, 304, 329, + 343, 97, 430, 751, 497, 314, 983, 374, 822, 928, + 140, 206, 73, 263, 980, 736, 876, 478, 430, 305, + 170, 514, 364, 692, 829, 82, 855, 953, 676, 246, + 369, 970, 294, 750, 807, 827, 150, 790, 288, 923, + 804, 378, 215, 828, 592, 281, 565, 555, 710, 82, + 896, 831, 547, 261, 524, 462, 293, 465, 502, 56, + 661, 821, 976, 991, 658, 869, 905, 758, 745, 193, + 768, 550, 608, 933, 378, 286, 215, 979, 792, 961, + 61, 688, 793, 644, 986, 403, 106, 366, 905, 644, + 372, 567, 466, 434, 645, 210, 389, 550, 919, 135, + 780, 773, 635, 389, 707, 100, 626, 958, 165, 504, + 920, 176, 193, 713, 857, 265, 203, 50, 668, 108, + 645, 990, 626, 197, 510, 357, 358, 850, 858, 364, + 936, 638 + }; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/BZip2Stream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/BZip2Stream.cs new file mode 100644 index 0000000000..c0f040a979 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/BZip2Stream.cs @@ -0,0 +1,111 @@ +using System.IO; + +namespace SharpCompress.Compressors.BZip2 +{ + public class BZip2Stream : Stream + { + private readonly Stream stream; + private bool isDisposed; + + /// + /// Create a BZip2Stream + /// + /// The stream to read from + /// Compression Mode + /// Decompress Concatenated + public BZip2Stream(Stream stream, CompressionMode compressionMode, + bool decompressConcatenated) + { + Mode = compressionMode; + if (Mode == CompressionMode.Compress) + { + this.stream = new CBZip2OutputStream(stream); + } + else + { + this.stream = new CBZip2InputStream(stream, decompressConcatenated); + } + } + + public void Finish() + { + (stream as CBZip2OutputStream)?.Finish(); + } + + protected override void Dispose(bool disposing) + { + if (isDisposed) + { + return; + } + isDisposed = true; + if (disposing) + { + stream.Dispose(); + } + } + + public CompressionMode Mode { get; } + + public override bool CanRead => stream.CanRead; + + public override bool CanSeek => stream.CanSeek; + + public override bool CanWrite => stream.CanWrite; + + public override void Flush() + { + stream.Flush(); + } + + public override long Length => stream.Length; + + public override long Position { get => stream.Position; set => stream.Position = value; } + + public override int Read(byte[] buffer, int offset, int count) + { + return stream.Read(buffer, offset, count); + } + + public override int ReadByte() + { + return stream.ReadByte(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + return stream.Seek(offset, origin); + } + + public override void SetLength(long value) + { + stream.SetLength(value); + } + + public override void Write(byte[] buffer, int offset, int count) + { + stream.Write(buffer, offset, count); + } + + public override void WriteByte(byte value) + { + stream.WriteByte(value); + } + + /// + /// Consumes two bytes to test if there is a BZip2 header + /// + /// + /// + public static bool IsBZip2(Stream stream) + { + BinaryReader br = new BinaryReader(stream); + byte[] chars = br.ReadBytes(2); + if (chars.Length < 2 || chars[0] != 'B' || chars[1] != 'Z') + { + return false; + } + return true; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CBZip2InputStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CBZip2InputStream.cs new file mode 100644 index 0000000000..68d924b439 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CBZip2InputStream.cs @@ -0,0 +1,1094 @@ +using System.IO; + +/* + * Copyright 2001,2004-2005 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This package is based on the work done by Keiron Liddle, Aftex Software + * to whom the Ant project is very grateful for his + * great code. + */ + +namespace SharpCompress.Compressors.BZip2 +{ + /** + * An input stream that decompresses from the BZip2 format (with the file + * header chars) to be read as any other stream. + * + * @author Keiron Liddle + * + * NB: note this class has been modified to read the leading BZ from the + * start of the BZIP2 stream to make it compatible with other PGP programs. + */ + + internal class CBZip2InputStream : Stream + { + private static void Cadvise() + { + //System.out.Println("CRC Error"); + //throw new CCoruptionError(); + } + + private static void BadBGLengths() + { + Cadvise(); + } + + private static void BitStreamEOF() + { + Cadvise(); + } + + private static void CompressedStreamEOF() + { + Cadvise(); + } + + private void MakeMaps() + { + int i; + nInUse = 0; + for (i = 0; i < 256; i++) + { + if (inUse[i]) + { + seqToUnseq[nInUse] = (char)i; + unseqToSeq[i] = (char)nInUse; + nInUse++; + } + } + } + + /* + index of the last char in the block, so + the block size == last + 1. + */ + private int last; + + /* + index in zptr[] of original string after sorting. + */ + private int origPtr; + + /* + always: in the range 0 .. 9. + The current block size is 100000 * this number. + */ + private int blockSize100k; + + private bool blockRandomised; + + private int bsBuff; + private int bsLive; + private readonly CRC mCrc = new CRC(); + + private readonly bool[] inUse = new bool[256]; + private int nInUse; + + private readonly char[] seqToUnseq = new char[256]; + private readonly char[] unseqToSeq = new char[256]; + + private readonly char[] selector = new char[BZip2Constants.MAX_SELECTORS]; + private readonly char[] selectorMtf = new char[BZip2Constants.MAX_SELECTORS]; + + private int[] tt; + private char[] ll8; + + /* + freq table collected to save a pass over the data + during decompression. + */ + private readonly int[] unzftab = new int[256]; + + private readonly int[][] limit = InitIntArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE); + private readonly int[][] basev = InitIntArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE); + private readonly int[][] perm = InitIntArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE); + private readonly int[] minLens = new int[BZip2Constants.N_GROUPS]; + + private Stream bsStream; + + private bool streamEnd; + + private int currentChar = -1; + + private const int START_BLOCK_STATE = 1; + private const int RAND_PART_A_STATE = 2; + private const int RAND_PART_B_STATE = 3; + private const int RAND_PART_C_STATE = 4; + private const int NO_RAND_PART_A_STATE = 5; + private const int NO_RAND_PART_B_STATE = 6; + private const int NO_RAND_PART_C_STATE = 7; + + private int currentState = START_BLOCK_STATE; + + private int storedBlockCRC, storedCombinedCRC; + private int computedBlockCRC, computedCombinedCRC; + private readonly bool decompressConcatenated; + + private int i2, count, chPrev, ch2; + private int i, tPos; + private int rNToGo; + private int rTPos; + private int j2; + private char z; + private bool isDisposed; + + public CBZip2InputStream(Stream zStream, bool decompressConcatenated) + { + this.decompressConcatenated = decompressConcatenated; + ll8 = null; + tt = null; + BsSetStream(zStream); + Initialize(true); + InitBlock(); + SetupBlock(); + } + + protected override void Dispose(bool disposing) + { + if (isDisposed) + { + return; + } + isDisposed = true; + base.Dispose(disposing); + if (bsStream != null) + { + bsStream.Dispose(); + } + } + + internal static int[][] InitIntArray(int n1, int n2) + { + int[][] a = new int[n1][]; + for (int k = 0; k < n1; ++k) + { + a[k] = new int[n2]; + } + return a; + } + + internal static char[][] InitCharArray(int n1, int n2) + { + char[][] a = new char[n1][]; + for (int k = 0; k < n1; ++k) + { + a[k] = new char[n2]; + } + return a; + } + + public override int ReadByte() + { + if (streamEnd) + { + return -1; + } + int retChar = currentChar; + switch (currentState) + { + case START_BLOCK_STATE: + break; + case RAND_PART_A_STATE: + break; + case RAND_PART_B_STATE: + SetupRandPartB(); + break; + case RAND_PART_C_STATE: + SetupRandPartC(); + break; + case NO_RAND_PART_A_STATE: + break; + case NO_RAND_PART_B_STATE: + SetupNoRandPartB(); + break; + case NO_RAND_PART_C_STATE: + SetupNoRandPartC(); + break; + default: + break; + } + return retChar; + } + + private bool Initialize(bool isFirstStream) + { + int magic0 = bsStream.ReadByte(); + int magic1 = bsStream.ReadByte(); + int magic2 = bsStream.ReadByte(); + if (magic0 == -1 && !isFirstStream) + { + return false; + } + if (magic0 != 'B' || magic1 != 'Z' || magic2 != 'h') + { + throw new IOException("Not a BZIP2 marked stream"); + } + int magic3 = bsStream.ReadByte(); + if (magic3 < '1' || magic3 > '9') + { + BsFinishedWithStream(); + streamEnd = true; + return false; + } + + SetDecompressStructureSizes(magic3 - '0'); + bsLive = 0; + computedCombinedCRC = 0; + return true; + } + + private void InitBlock() + { + char magic1, magic2, magic3, magic4; + char magic5, magic6; + + while (true) + { + magic1 = BsGetUChar(); + magic2 = BsGetUChar(); + magic3 = BsGetUChar(); + magic4 = BsGetUChar(); + magic5 = BsGetUChar(); + magic6 = BsGetUChar(); + if (magic1 != 0x17 || magic2 != 0x72 || magic3 != 0x45 + || magic4 != 0x38 || magic5 != 0x50 || magic6 != 0x90) + { + break; + } + + if (Complete()) + { + return; + } + } + + if (magic1 != 0x31 || magic2 != 0x41 || magic3 != 0x59 + || magic4 != 0x26 || magic5 != 0x53 || magic6 != 0x59) + { + BadBlockHeader(); + streamEnd = true; + return; + } + + storedBlockCRC = BsGetInt32(); + + if (BsR(1) == 1) + { + blockRandomised = true; + } + else + { + blockRandomised = false; + } + + // currBlockNo++; + GetAndMoveToFrontDecode(); + + mCrc.InitialiseCRC(); + currentState = START_BLOCK_STATE; + } + + private void EndBlock() + { + computedBlockCRC = mCrc.GetFinalCRC(); + /* A bad CRC is considered a fatal error. */ + if (storedBlockCRC != computedBlockCRC) + { + CrcError(); + } + + computedCombinedCRC = (computedCombinedCRC << 1) + | (int)(((uint)computedCombinedCRC) >> 31); + computedCombinedCRC ^= computedBlockCRC; + } + + private bool Complete() + { + storedCombinedCRC = BsGetInt32(); + if (storedCombinedCRC != computedCombinedCRC) + { + CrcError(); + } + + bool complete = !decompressConcatenated || !Initialize(false); + if (complete) + { + BsFinishedWithStream(); + streamEnd = true; + } + + // Look for the next .bz2 stream if decompressing + // concatenated files. + return complete; + } + + private static void BlockOverrun() + { + Cadvise(); + } + + private static void BadBlockHeader() + { + Cadvise(); + } + + private static void CrcError() + { + Cadvise(); + } + + private void BsFinishedWithStream() + { + bsStream?.Dispose(); + bsStream = null; + } + + private void BsSetStream(Stream f) + { + bsStream = f; + bsLive = 0; + bsBuff = 0; + } + + private int BsR(int n) + { + int v; + while (bsLive < n) + { + int zzi; + int thech = '\0'; + try + { + thech = (char)bsStream.ReadByte(); + } + catch (IOException) + { + CompressedStreamEOF(); + } + if (thech == '\uffff') + { + CompressedStreamEOF(); + } + zzi = thech; + bsBuff = (bsBuff << 8) | (zzi & 0xff); + bsLive += 8; + } + + v = (bsBuff >> (bsLive - n)) & ((1 << n) - 1); + bsLive -= n; + return v; + } + + private char BsGetUChar() + { + return (char)BsR(8); + } + + private int BsGetint() + { + int u = 0; + u = (u << 8) | BsR(8); + u = (u << 8) | BsR(8); + u = (u << 8) | BsR(8); + u = (u << 8) | BsR(8); + return u; + } + + private int BsGetIntVS(int numBits) + { + return BsR(numBits); + } + + private int BsGetInt32() + { + return BsGetint(); + } + + private void HbCreateDecodeTables(int[] limit, int[] basev, + int[] perm, char[] length, + int minLen, int maxLen, int alphaSize) + { + int pp, i, j, vec; + + pp = 0; + for (i = minLen; i <= maxLen; i++) + { + for (j = 0; j < alphaSize; j++) + { + if (length[j] == i) + { + perm[pp] = j; + pp++; + } + } + } + + for (i = 0; i < BZip2Constants.MAX_CODE_LEN; i++) + { + basev[i] = 0; + } + for (i = 0; i < alphaSize; i++) + { + basev[length[i] + 1]++; + } + + for (i = 1; i < BZip2Constants.MAX_CODE_LEN; i++) + { + basev[i] += basev[i - 1]; + } + + for (i = 0; i < BZip2Constants.MAX_CODE_LEN; i++) + { + limit[i] = 0; + } + vec = 0; + + for (i = minLen; i <= maxLen; i++) + { + vec += (basev[i + 1] - basev[i]); + limit[i] = vec - 1; + vec <<= 1; + } + for (i = minLen + 1; i <= maxLen; i++) + { + basev[i] = ((limit[i - 1] + 1) << 1) - basev[i]; + } + } + + private void RecvDecodingTables() + { + char[][] len = InitCharArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE); + int i, j, t, nGroups, nSelectors, alphaSize; + int minLen, maxLen; + bool[] inUse16 = new bool[16]; + + /* Receive the mapping table */ + for (i = 0; i < 16; i++) + { + if (BsR(1) == 1) + { + inUse16[i] = true; + } + else + { + inUse16[i] = false; + } + } + + for (i = 0; i < 256; i++) + { + inUse[i] = false; + } + + for (i = 0; i < 16; i++) + { + if (inUse16[i]) + { + for (j = 0; j < 16; j++) + { + if (BsR(1) == 1) + { + inUse[i * 16 + j] = true; + } + } + } + } + + MakeMaps(); + alphaSize = nInUse + 2; + + /* Now the selectors */ + nGroups = BsR(3); + nSelectors = BsR(15); + for (i = 0; i < nSelectors; i++) + { + j = 0; + while (BsR(1) == 1) + { + j++; + } + selectorMtf[i] = (char)j; + } + + /* Undo the MTF values for the selectors. */ + { + char[] pos = new char[BZip2Constants.N_GROUPS]; + char tmp, v; + for (v = '\0'; v < nGroups; v++) + { + pos[v] = v; + } + + for (i = 0; i < nSelectors; i++) + { + v = selectorMtf[i]; + tmp = pos[v]; + while (v > 0) + { + pos[v] = pos[v - 1]; + v--; + } + pos[0] = tmp; + selector[i] = tmp; + } + } + + /* Now the coding tables */ + for (t = 0; t < nGroups; t++) + { + int curr = BsR(5); + for (i = 0; i < alphaSize; i++) + { + while (BsR(1) == 1) + { + if (BsR(1) == 0) + { + curr++; + } + else + { + curr--; + } + } + len[t][i] = (char)curr; + } + } + + /* Create the Huffman decoding tables */ + for (t = 0; t < nGroups; t++) + { + minLen = 32; + maxLen = 0; + for (i = 0; i < alphaSize; i++) + { + if (len[t][i] > maxLen) + { + maxLen = len[t][i]; + } + if (len[t][i] < minLen) + { + minLen = len[t][i]; + } + } + HbCreateDecodeTables(limit[t], basev[t], perm[t], len[t], minLen, + maxLen, alphaSize); + minLens[t] = minLen; + } + } + + private void GetAndMoveToFrontDecode() + { + char[] yy = new char[256]; + int i, j, nextSym, limitLast; + int EOB, groupNo, groupPos; + + limitLast = BZip2Constants.baseBlockSize * blockSize100k; + origPtr = BsGetIntVS(24); + + RecvDecodingTables(); + EOB = nInUse + 1; + groupNo = -1; + groupPos = 0; + + /* + Setting up the unzftab entries here is not strictly + necessary, but it does save having to do it later + in a separate pass, and so saves a block's worth of + cache misses. + */ + for (i = 0; i <= 255; i++) + { + unzftab[i] = 0; + } + + for (i = 0; i <= 255; i++) + { + yy[i] = (char)i; + } + + last = -1; + + { + int zt, zn, zvec, zj; + if (groupPos == 0) + { + groupNo++; + groupPos = BZip2Constants.G_SIZE; + } + groupPos--; + zt = selector[groupNo]; + zn = minLens[zt]; + zvec = BsR(zn); + while (zvec > limit[zt][zn]) + { + zn++; + { + { + while (bsLive < 1) + { + int zzi; + char thech = '\0'; + try + { + thech = (char)bsStream.ReadByte(); + } + catch (IOException) + { + CompressedStreamEOF(); + } + if (thech == '\uffff') + { + CompressedStreamEOF(); + } + zzi = thech; + bsBuff = (bsBuff << 8) | (zzi & 0xff); + bsLive += 8; + } + } + zj = (bsBuff >> (bsLive - 1)) & 1; + bsLive--; + } + zvec = (zvec << 1) | zj; + } + nextSym = perm[zt][zvec - basev[zt][zn]]; + } + + while (true) + { + if (nextSym == EOB) + { + break; + } + + if (nextSym == BZip2Constants.RUNA || nextSym == BZip2Constants.RUNB) + { + char ch; + int s = -1; + int N = 1; + do + { + if (nextSym == BZip2Constants.RUNA) + { + s = s + (0 + 1) * N; + } + else if (nextSym == BZip2Constants.RUNB) + { + s = s + (1 + 1) * N; + } + N = N * 2; + { + int zt, zn, zvec, zj; + if (groupPos == 0) + { + groupNo++; + groupPos = BZip2Constants.G_SIZE; + } + groupPos--; + zt = selector[groupNo]; + zn = minLens[zt]; + zvec = BsR(zn); + while (zvec > limit[zt][zn]) + { + zn++; + { + { + while (bsLive < 1) + { + int zzi; + char thech = '\0'; + try + { + thech = (char)bsStream.ReadByte(); + } + catch (IOException) + { + CompressedStreamEOF(); + } + if (thech == '\uffff') + { + CompressedStreamEOF(); + } + zzi = thech; + bsBuff = (bsBuff << 8) | (zzi & 0xff); + bsLive += 8; + } + } + zj = (bsBuff >> (bsLive - 1)) & 1; + bsLive--; + } + zvec = (zvec << 1) | zj; + } + nextSym = perm[zt][zvec - basev[zt][zn]]; + } + } + while (nextSym == BZip2Constants.RUNA || nextSym == BZip2Constants.RUNB); + + s++; + ch = seqToUnseq[yy[0]]; + unzftab[ch] += s; + + while (s > 0) + { + last++; + ll8[last] = ch; + s--; + } + + if (last >= limitLast) + { + BlockOverrun(); + } + } + else + { + char tmp; + last++; + if (last >= limitLast) + { + BlockOverrun(); + } + + tmp = yy[nextSym - 1]; + unzftab[seqToUnseq[tmp]]++; + ll8[last] = seqToUnseq[tmp]; + + /* + This loop is hammered during decompression, + hence the unrolling. + + for (j = nextSym-1; j > 0; j--) yy[j] = yy[j-1]; + */ + + j = nextSym - 1; + for (; j > 3; j -= 4) + { + yy[j] = yy[j - 1]; + yy[j - 1] = yy[j - 2]; + yy[j - 2] = yy[j - 3]; + yy[j - 3] = yy[j - 4]; + } + for (; j > 0; j--) + { + yy[j] = yy[j - 1]; + } + + yy[0] = tmp; + { + int zt, zn, zvec, zj; + if (groupPos == 0) + { + groupNo++; + groupPos = BZip2Constants.G_SIZE; + } + groupPos--; + zt = selector[groupNo]; + zn = minLens[zt]; + zvec = BsR(zn); + while (zvec > limit[zt][zn]) + { + zn++; + { + { + while (bsLive < 1) + { + int zzi; + char thech = '\0'; + try + { + thech = (char)bsStream.ReadByte(); + } + catch (IOException) + { + CompressedStreamEOF(); + } + zzi = thech; + bsBuff = (bsBuff << 8) | (zzi & 0xff); + bsLive += 8; + } + } + zj = (bsBuff >> (bsLive - 1)) & 1; + bsLive--; + } + zvec = (zvec << 1) | zj; + } + nextSym = perm[zt][zvec - basev[zt][zn]]; + } + } + } + } + + private void SetupBlock() + { + int[] cftab = new int[257]; + char ch; + + cftab[0] = 0; + for (i = 1; i <= 256; i++) + { + cftab[i] = unzftab[i - 1]; + } + for (i = 1; i <= 256; i++) + { + cftab[i] += cftab[i - 1]; + } + + for (i = 0; i <= last; i++) + { + ch = ll8[i]; + tt[cftab[ch]] = i; + cftab[ch]++; + } + cftab = null; + + tPos = tt[origPtr]; + + count = 0; + i2 = 0; + ch2 = 256; /* not a char and not EOF */ + + if (blockRandomised) + { + rNToGo = 0; + rTPos = 0; + SetupRandPartA(); + } + else + { + SetupNoRandPartA(); + } + } + + private void SetupRandPartA() + { + if (i2 <= last) + { + chPrev = ch2; + ch2 = ll8[tPos]; + tPos = tt[tPos]; + if (rNToGo == 0) + { + rNToGo = BZip2Constants.rNums[rTPos]; + rTPos++; + if (rTPos == 512) + { + rTPos = 0; + } + } + rNToGo--; + ch2 ^= (rNToGo == 1) ? 1 : 0; + i2++; + + currentChar = ch2; + currentState = RAND_PART_B_STATE; + mCrc.UpdateCRC(ch2); + } + else + { + EndBlock(); + InitBlock(); + SetupBlock(); + } + } + + private void SetupNoRandPartA() + { + if (i2 <= last) + { + chPrev = ch2; + ch2 = ll8[tPos]; + tPos = tt[tPos]; + i2++; + + currentChar = ch2; + currentState = NO_RAND_PART_B_STATE; + mCrc.UpdateCRC(ch2); + } + else + { + EndBlock(); + InitBlock(); + SetupBlock(); + } + } + + private void SetupRandPartB() + { + if (ch2 != chPrev) + { + currentState = RAND_PART_A_STATE; + count = 1; + SetupRandPartA(); + } + else + { + count++; + if (count >= 4) + { + z = ll8[tPos]; + tPos = tt[tPos]; + if (rNToGo == 0) + { + rNToGo = BZip2Constants.rNums[rTPos]; + rTPos++; + if (rTPos == 512) + { + rTPos = 0; + } + } + rNToGo--; + z ^= (char)((rNToGo == 1) ? 1 : 0); + j2 = 0; + currentState = RAND_PART_C_STATE; + SetupRandPartC(); + } + else + { + currentState = RAND_PART_A_STATE; + SetupRandPartA(); + } + } + } + + private void SetupRandPartC() + { + if (j2 < z) + { + currentChar = ch2; + mCrc.UpdateCRC(ch2); + j2++; + } + else + { + currentState = RAND_PART_A_STATE; + i2++; + count = 0; + SetupRandPartA(); + } + } + + private void SetupNoRandPartB() + { + if (ch2 != chPrev) + { + currentState = NO_RAND_PART_A_STATE; + count = 1; + SetupNoRandPartA(); + } + else + { + count++; + if (count >= 4) + { + z = ll8[tPos]; + tPos = tt[tPos]; + currentState = NO_RAND_PART_C_STATE; + j2 = 0; + SetupNoRandPartC(); + } + else + { + currentState = NO_RAND_PART_A_STATE; + SetupNoRandPartA(); + } + } + } + + private void SetupNoRandPartC() + { + if (j2 < z) + { + currentChar = ch2; + mCrc.UpdateCRC(ch2); + j2++; + } + else + { + currentState = NO_RAND_PART_A_STATE; + i2++; + count = 0; + SetupNoRandPartA(); + } + } + + private void SetDecompressStructureSizes(int newSize100k) + { + if (!(0 <= newSize100k && newSize100k <= 9 && 0 <= blockSize100k + && blockSize100k <= 9)) + { + // throw new IOException("Invalid block size"); + } + + blockSize100k = newSize100k; + + if (newSize100k == 0) + { + return; + } + + int n = BZip2Constants.baseBlockSize * newSize100k; + ll8 = new char[n]; + tt = new int[n]; + } + + public override void Flush() + { + } + + public override int Read(byte[] buffer, int offset, int count) + { + int c = -1; + int k; + for (k = 0; k < count; ++k) + { + c = ReadByte(); + if (c == -1) + { + break; + } + buffer[k + offset] = (byte)c; + } + return k; + } + + public override long Seek(long offset, SeekOrigin origin) + { + return 0; + } + + public override void SetLength(long value) + { + } + + public override void Write(byte[] buffer, int offset, int count) + { + } + + public override void WriteByte(byte value) + { + } + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override long Length => 0; + + public override long Position { get { return 0; } set { } } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CBZip2OutputStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CBZip2OutputStream.cs new file mode 100644 index 0000000000..e95ddb3696 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CBZip2OutputStream.cs @@ -0,0 +1,1964 @@ +using System.IO; + +/* + * Copyright 2001,2004-2005 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This package is based on the work done by Keiron Liddle, Aftex Software + * to whom the Ant project is very grateful for his + * great code. + */ + +namespace SharpCompress.Compressors.BZip2 +{ + /** + * An output stream that compresses into the BZip2 format (with the file + * header chars) into another stream. + * + * @author Keiron Liddle + * + * TODO: Update to BZip2 1.0.1 + * NB: note this class has been modified to add a leading BZ to the + * start of the BZIP2 stream to make it compatible with other PGP programs. + */ + + internal class CBZip2OutputStream : Stream + { + protected const int SETMASK = (1 << 21); + protected const int CLEARMASK = (~SETMASK); + protected const int GREATER_ICOST = 15; + protected const int LESSER_ICOST = 0; + protected const int SMALL_THRESH = 20; + protected const int DEPTH_THRESH = 10; + + /* + If you are ever unlucky/improbable enough + to get a stack overflow whilst sorting, + increase the following constant and try + again. In practice I have never seen the + stack go above 27 elems, so the following + limit seems very generous. + */ + protected const int QSORT_STACK_SIZE = 1000; + private bool finished; + + private static void Panic() + { + //System.out.Println("panic"); + //throw new CError(); + } + + private void MakeMaps() + { + int i; + nInUse = 0; + for (i = 0; i < 256; i++) + { + if (inUse[i]) + { + seqToUnseq[nInUse] = (char)i; + unseqToSeq[i] = (char)nInUse; + nInUse++; + } + } + } + + protected static void HbMakeCodeLengths(char[] len, int[] freq, + int alphaSize, int maxLen) + { + /* + Nodes and heap entries run from 1. Entry 0 + for both the heap and nodes is a sentinel. + */ + int nNodes, nHeap, n1, n2, i, j, k; + bool tooLong; + + int[] heap = new int[BZip2Constants.MAX_ALPHA_SIZE + 2]; + int[] weight = new int[BZip2Constants.MAX_ALPHA_SIZE * 2]; + int[] parent = new int[BZip2Constants.MAX_ALPHA_SIZE * 2]; + + for (i = 0; i < alphaSize; i++) + { + weight[i + 1] = (freq[i] == 0 ? 1 : freq[i]) << 8; + } + + while (true) + { + nNodes = alphaSize; + nHeap = 0; + + heap[0] = 0; + weight[0] = 0; + parent[0] = -2; + + for (i = 1; i <= alphaSize; i++) + { + parent[i] = -1; + nHeap++; + heap[nHeap] = i; + { + int zz, tmp; + zz = nHeap; + tmp = heap[zz]; + while (weight[tmp] < weight[heap[zz >> 1]]) + { + heap[zz] = heap[zz >> 1]; + zz >>= 1; + } + heap[zz] = tmp; + } + } + if (!(nHeap < (BZip2Constants.MAX_ALPHA_SIZE + 2))) + { + Panic(); + } + + while (nHeap > 1) + { + n1 = heap[1]; + heap[1] = heap[nHeap]; + nHeap--; + { + int zz = 0, yy = 0, tmp = 0; + zz = 1; + tmp = heap[zz]; + while (true) + { + yy = zz << 1; + if (yy > nHeap) + { + break; + } + if (yy < nHeap + && weight[heap[yy + 1]] < weight[heap[yy]]) + { + yy++; + } + if (weight[tmp] < weight[heap[yy]]) + { + break; + } + heap[zz] = heap[yy]; + zz = yy; + } + heap[zz] = tmp; + } + n2 = heap[1]; + heap[1] = heap[nHeap]; + nHeap--; + { + int zz = 0, yy = 0, tmp = 0; + zz = 1; + tmp = heap[zz]; + while (true) + { + yy = zz << 1; + if (yy > nHeap) + { + break; + } + if (yy < nHeap + && weight[heap[yy + 1]] < weight[heap[yy]]) + { + yy++; + } + if (weight[tmp] < weight[heap[yy]]) + { + break; + } + heap[zz] = heap[yy]; + zz = yy; + } + heap[zz] = tmp; + } + nNodes++; + parent[n1] = parent[n2] = nNodes; + + weight[nNodes] = (int)((uint)((weight[n1] & 0xffffff00) + + (weight[n2] & 0xffffff00)) + | (uint)(1 + (((weight[n1] & 0x000000ff) > + (weight[n2] & 0x000000ff)) + ? (weight[n1] & 0x000000ff) + : (weight[n2] & 0x000000ff)))); + + parent[nNodes] = -1; + nHeap++; + heap[nHeap] = nNodes; + { + int zz = 0, tmp = 0; + zz = nHeap; + tmp = heap[zz]; + while (weight[tmp] < weight[heap[zz >> 1]]) + { + heap[zz] = heap[zz >> 1]; + zz >>= 1; + } + heap[zz] = tmp; + } + } + if (!(nNodes < (BZip2Constants.MAX_ALPHA_SIZE * 2))) + { + Panic(); + } + + tooLong = false; + for (i = 1; i <= alphaSize; i++) + { + j = 0; + k = i; + while (parent[k] >= 0) + { + k = parent[k]; + j++; + } + len[i - 1] = (char)j; + if (j > maxLen) + { + tooLong = true; + } + } + + if (!tooLong) + { + break; + } + + for (i = 1; i < alphaSize; i++) + { + j = weight[i] >> 8; + j = 1 + (j / 2); + weight[i] = j << 8; + } + } + } + + /* + index of the last char in the block, so + the block size == last + 1. + */ + private int last; + + /* + index in zptr[] of original string after sorting. + */ + private int origPtr; + + /* + always: in the range 0 .. 9. + The current block size is 100000 * this number. + */ + private readonly int blockSize100k; + + private bool blockRandomised; + + private int bytesOut; + private int bsBuff; + private int bsLive; + private readonly CRC mCrc = new CRC(); + + private readonly bool[] inUse = new bool[256]; + private int nInUse; + + private readonly char[] seqToUnseq = new char[256]; + private readonly char[] unseqToSeq = new char[256]; + + private readonly char[] selector = new char[BZip2Constants.MAX_SELECTORS]; + private readonly char[] selectorMtf = new char[BZip2Constants.MAX_SELECTORS]; + + private char[] block; + private int[] quadrant; + private int[] zptr; + private short[] szptr; + private int[] ftab; + + private int nMTF; + + private readonly int[] mtfFreq = new int[BZip2Constants.MAX_ALPHA_SIZE]; + + /* + * Used when sorting. If too many long comparisons + * happen, we stop sorting, randomise the block + * slightly, and try again. + */ + private readonly int workFactor; + private int workDone; + private int workLimit; + private bool firstAttempt; + private int nBlocksRandomised; + + private int currentChar = -1; + private int runLength; + + public CBZip2OutputStream(Stream inStream) + : this(inStream, 9) + { + } + + public CBZip2OutputStream(Stream inStream, int inBlockSize) + { + block = null; + quadrant = null; + zptr = null; + ftab = null; + + inStream.WriteByte((byte)'B'); + inStream.WriteByte((byte)'Z'); + + BsSetStream(inStream); + + workFactor = 50; + if (inBlockSize > 9) + { + inBlockSize = 9; + } + if (inBlockSize < 1) + { + inBlockSize = 1; + } + blockSize100k = inBlockSize; + AllocateCompressStructures(); + Initialize(); + InitBlock(); + } + + /** + * + * modified by Oliver Merkel, 010128 + * + */ + + public override void WriteByte(byte bv) + { + int b = (256 + bv) % 256; + if (currentChar != -1) + { + if (currentChar == b) + { + runLength++; + if (runLength > 254) + { + WriteRun(); + currentChar = -1; + runLength = 0; + } + } + else + { + WriteRun(); + runLength = 1; + currentChar = b; + } + } + else + { + currentChar = b; + runLength++; + } + } + + private void WriteRun() + { + if (last < allowableBlockSize) + { + inUse[currentChar] = true; + for (int i = 0; i < runLength; i++) + { + mCrc.UpdateCRC((char)currentChar); + } + switch (runLength) + { + case 1: + last++; + block[last + 1] = (char)currentChar; + break; + case 2: + last++; + block[last + 1] = (char)currentChar; + last++; + block[last + 1] = (char)currentChar; + break; + case 3: + last++; + block[last + 1] = (char)currentChar; + last++; + block[last + 1] = (char)currentChar; + last++; + block[last + 1] = (char)currentChar; + break; + default: + inUse[runLength - 4] = true; + last++; + block[last + 1] = (char)currentChar; + last++; + block[last + 1] = (char)currentChar; + last++; + block[last + 1] = (char)currentChar; + last++; + block[last + 1] = (char)currentChar; + last++; + block[last + 1] = (char)(runLength - 4); + break; + } + } + else + { + EndBlock(); + InitBlock(); + WriteRun(); + } + } + + private bool disposed; + + protected override void Dispose(bool disposing) + { + if (disposing) + { + if (disposed) + { + return; + } + + Finish(); + + disposed = true; + base.Dispose(); + bsStream?.Dispose(); + bsStream = null; + } + } + + public void Finish() + { + if (finished) + { + return; + } + + if (runLength > 0) + { + WriteRun(); + } + currentChar = -1; + EndBlock(); + EndCompression(); + finished = true; + Flush(); + } + + public override void Flush() + { + bsStream.Flush(); + } + + private int blockCRC, combinedCRC; + + private void Initialize() + { + bytesOut = 0; + nBlocksRandomised = 0; + + /* Write `magic' bytes h indicating file-format == huffmanised, + followed by a digit indicating blockSize100k. + */ + BsPutUChar('h'); + BsPutUChar('0' + blockSize100k); + + combinedCRC = 0; + } + + private int allowableBlockSize; + + private void InitBlock() + { + // blockNo++; + mCrc.InitialiseCRC(); + last = -1; + + // ch = 0; + + for (int i = 0; i < 256; i++) + { + inUse[i] = false; + } + + /* 20 is just a paranoia constant */ + allowableBlockSize = BZip2Constants.baseBlockSize * blockSize100k - 20; + } + + private void EndBlock() + { + blockCRC = mCrc.GetFinalCRC(); + combinedCRC = (combinedCRC << 1) | (int)(((uint)combinedCRC) >> 31); + combinedCRC ^= blockCRC; + + /* sort the block and establish posn of original string */ + DoReversibleTransformation(); + + /* + A 6-byte block header, the value chosen arbitrarily + as 0x314159265359 :-). A 32 bit value does not really + give a strong enough guarantee that the value will not + appear by chance in the compressed datastream. Worst-case + probability of this event, for a 900k block, is about + 2.0e-3 for 32 bits, 1.0e-5 for 40 bits and 4.0e-8 for 48 bits. + For a compressed file of size 100Gb -- about 100000 blocks -- + only a 48-bit marker will do. NB: normal compression/ + decompression do *not* rely on these statistical properties. + They are only important when trying to recover blocks from + damaged files. + */ + BsPutUChar(0x31); + BsPutUChar(0x41); + BsPutUChar(0x59); + BsPutUChar(0x26); + BsPutUChar(0x53); + BsPutUChar(0x59); + + /* Now the block's CRC, so it is in a known place. */ + BsPutint(blockCRC); + + /* Now a single bit indicating randomisation. */ + if (blockRandomised) + { + BsW(1, 1); + nBlocksRandomised++; + } + else + { + BsW(1, 0); + } + + /* Finally, block's contents proper. */ + MoveToFrontCodeAndSend(); + } + + private void EndCompression() + { + /* + Now another magic 48-bit number, 0x177245385090, to + indicate the end of the last block. (Sqrt(pi), if + you want to know. I did want to use e, but it contains + too much repetition -- 27 18 28 18 28 46 -- for me + to feel statistically comfortable. Call me paranoid.) + */ + BsPutUChar(0x17); + BsPutUChar(0x72); + BsPutUChar(0x45); + BsPutUChar(0x38); + BsPutUChar(0x50); + BsPutUChar(0x90); + + BsPutint(combinedCRC); + + BsFinishedWithStream(); + } + + private void HbAssignCodes(int[] code, char[] length, int minLen, + int maxLen, int alphaSize) + { + int n, vec, i; + + vec = 0; + for (n = minLen; n <= maxLen; n++) + { + for (i = 0; i < alphaSize; i++) + { + if (length[i] == n) + { + code[i] = vec; + vec++; + } + } + ; + vec <<= 1; + } + } + + private void BsSetStream(Stream f) + { + bsStream = f; + bsLive = 0; + bsBuff = 0; + bytesOut = 0; + } + + private void BsFinishedWithStream() + { + while (bsLive > 0) + { + int ch = (bsBuff >> 24); + try + { + bsStream.WriteByte((byte)ch); // write 8-bit + } + catch (IOException e) + { + throw e; + } + bsBuff <<= 8; + bsLive -= 8; + bytesOut++; + } + } + + private void BsW(int n, int v) + { + while (bsLive >= 8) + { + int ch = (bsBuff >> 24); + try + { + bsStream.WriteByte((byte)ch); // write 8-bit + } + catch (IOException e) + { + throw e; + } + bsBuff <<= 8; + bsLive -= 8; + bytesOut++; + } + bsBuff |= (v << (32 - bsLive - n)); + bsLive += n; + } + + private void BsPutUChar(int c) + { + BsW(8, c); + } + + private void BsPutint(int u) + { + BsW(8, (u >> 24) & 0xff); + BsW(8, (u >> 16) & 0xff); + BsW(8, (u >> 8) & 0xff); + BsW(8, u & 0xff); + } + + private void BsPutIntVS(int numBits, int c) + { + BsW(numBits, c); + } + + private void SendMTFValues() + { + char[][] len = CBZip2InputStream.InitCharArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE); + + int v, t, i, j, gs, ge, totc, bt, bc, iter; + int nSelectors = 0, alphaSize, minLen, maxLen, selCtr; + int nGroups; //, nBytes; + + alphaSize = nInUse + 2; + for (t = 0; t < BZip2Constants.N_GROUPS; t++) + { + for (v = 0; v < alphaSize; v++) + { + len[t][v] = (char)GREATER_ICOST; + } + } + + /* Decide how many coding tables to use */ + if (nMTF <= 0) + { + Panic(); + } + + if (nMTF < 200) + { + nGroups = 2; + } + else if (nMTF < 600) + { + nGroups = 3; + } + else if (nMTF < 1200) + { + nGroups = 4; + } + else if (nMTF < 2400) + { + nGroups = 5; + } + else + { + nGroups = 6; + } + + /* Generate an initial set of coding tables */ + { + int nPart, remF, tFreq, aFreq; + + nPart = nGroups; + remF = nMTF; + gs = 0; + while (nPart > 0) + { + tFreq = remF / nPart; + ge = gs - 1; + aFreq = 0; + while (aFreq < tFreq && ge < alphaSize - 1) + { + ge++; + aFreq += mtfFreq[ge]; + } + + if (ge > gs && nPart != nGroups && nPart != 1 + && ((nGroups - nPart) % 2 == 1)) + { + aFreq -= mtfFreq[ge]; + ge--; + } + + for (v = 0; v < alphaSize; v++) + { + if (v >= gs && v <= ge) + { + len[nPart - 1][v] = (char)LESSER_ICOST; + } + else + { + len[nPart - 1][v] = (char)GREATER_ICOST; + } + } + + nPart--; + gs = ge + 1; + remF -= aFreq; + } + } + + int[][] rfreq = CBZip2InputStream.InitIntArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE); + int[] fave = new int[BZip2Constants.N_GROUPS]; + short[] cost = new short[BZip2Constants.N_GROUPS]; + /* + Iterate up to N_ITERS times to improve the tables. + */ + for (iter = 0; iter < BZip2Constants.N_ITERS; iter++) + { + for (t = 0; t < nGroups; t++) + { + fave[t] = 0; + } + + for (t = 0; t < nGroups; t++) + { + for (v = 0; v < alphaSize; v++) + { + rfreq[t][v] = 0; + } + } + + nSelectors = 0; + totc = 0; + gs = 0; + while (true) + { + /* Set group start & end marks. */ + if (gs >= nMTF) + { + break; + } + ge = gs + BZip2Constants.G_SIZE - 1; + if (ge >= nMTF) + { + ge = nMTF - 1; + } + + /* + Calculate the cost of this group as coded + by each of the coding tables. + */ + for (t = 0; t < nGroups; t++) + { + cost[t] = 0; + } + + if (nGroups == 6) + { + short cost0, cost1, cost2, cost3, cost4, cost5; + cost0 = cost1 = cost2 = cost3 = cost4 = cost5 = 0; + for (i = gs; i <= ge; i++) + { + short icv = szptr[i]; + cost0 += (short)len[0][icv]; + cost1 += (short)len[1][icv]; + cost2 += (short)len[2][icv]; + cost3 += (short)len[3][icv]; + cost4 += (short)len[4][icv]; + cost5 += (short)len[5][icv]; + } + cost[0] = cost0; + cost[1] = cost1; + cost[2] = cost2; + cost[3] = cost3; + cost[4] = cost4; + cost[5] = cost5; + } + else + { + for (i = gs; i <= ge; i++) + { + short icv = szptr[i]; + for (t = 0; t < nGroups; t++) + { + cost[t] += (short)len[t][icv]; + } + } + } + + /* + Find the coding table which is best for this group, + and record its identity in the selector table. + */ + bc = 999999999; + bt = -1; + for (t = 0; t < nGroups; t++) + { + if (cost[t] < bc) + { + bc = cost[t]; + bt = t; + } + } + ; + totc += bc; + fave[bt]++; + selector[nSelectors] = (char)bt; + nSelectors++; + + /* + Increment the symbol frequencies for the selected table. + */ + for (i = gs; i <= ge; i++) + { + rfreq[bt][szptr[i]]++; + } + + gs = ge + 1; + } + + /* + Recompute the tables based on the accumulated frequencies. + */ + for (t = 0; t < nGroups; t++) + { + HbMakeCodeLengths(len[t], rfreq[t], alphaSize, 20); + } + } + + rfreq = null; + fave = null; + cost = null; + + if (!(nGroups < 8)) + { + Panic(); + } + if (!(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZip2Constants.G_SIZE)))) + { + Panic(); + } + + /* Compute MTF values for the selectors. */ + { + char[] pos = new char[BZip2Constants.N_GROUPS]; + char ll_i, tmp2, tmp; + for (i = 0; i < nGroups; i++) + { + pos[i] = (char)i; + } + for (i = 0; i < nSelectors; i++) + { + ll_i = selector[i]; + j = 0; + tmp = pos[j]; + while (ll_i != tmp) + { + j++; + tmp2 = tmp; + tmp = pos[j]; + pos[j] = tmp2; + } + pos[0] = tmp; + selectorMtf[i] = (char)j; + } + } + + int[][] code = CBZip2InputStream.InitIntArray(BZip2Constants.N_GROUPS, BZip2Constants.MAX_ALPHA_SIZE); + + /* Assign actual codes for the tables. */ + for (t = 0; t < nGroups; t++) + { + minLen = 32; + maxLen = 0; + for (i = 0; i < alphaSize; i++) + { + if (len[t][i] > maxLen) + { + maxLen = len[t][i]; + } + if (len[t][i] < minLen) + { + minLen = len[t][i]; + } + } + if (maxLen > 20) + { + Panic(); + } + if (minLen < 1) + { + Panic(); + } + HbAssignCodes(code[t], len[t], minLen, maxLen, alphaSize); + } + + /* Transmit the mapping table. */ + { + bool[] inUse16 = new bool[16]; + for (i = 0; i < 16; i++) + { + inUse16[i] = false; + for (j = 0; j < 16; j++) + { + if (inUse[i * 16 + j]) + { + inUse16[i] = true; + } + } + } + + //nBytes = bytesOut; + for (i = 0; i < 16; i++) + { + if (inUse16[i]) + { + BsW(1, 1); + } + else + { + BsW(1, 0); + } + } + + for (i = 0; i < 16; i++) + { + if (inUse16[i]) + { + for (j = 0; j < 16; j++) + { + if (inUse[i * 16 + j]) + { + BsW(1, 1); + } + else + { + BsW(1, 0); + } + } + } + } + } + + /* Now the selectors. */ + //nBytes = bytesOut; + BsW(3, nGroups); + BsW(15, nSelectors); + for (i = 0; i < nSelectors; i++) + { + for (j = 0; j < selectorMtf[i]; j++) + { + BsW(1, 1); + } + BsW(1, 0); + } + + /* Now the coding tables. */ + //nBytes = bytesOut; + + for (t = 0; t < nGroups; t++) + { + int curr = len[t][0]; + BsW(5, curr); + for (i = 0; i < alphaSize; i++) + { + while (curr < len[t][i]) + { + BsW(2, 2); + curr++; /* 10 */ + } + while (curr > len[t][i]) + { + BsW(2, 3); + curr--; /* 11 */ + } + BsW(1, 0); + } + } + + /* And finally, the block data proper */ + //nBytes = bytesOut; + selCtr = 0; + gs = 0; + while (true) + { + if (gs >= nMTF) + { + break; + } + ge = gs + BZip2Constants.G_SIZE - 1; + if (ge >= nMTF) + { + ge = nMTF - 1; + } + for (i = gs; i <= ge; i++) + { + BsW(len[selector[selCtr]][szptr[i]], + code[selector[selCtr]][szptr[i]]); + } + + gs = ge + 1; + selCtr++; + } + if (!(selCtr == nSelectors)) + { + Panic(); + } + } + + private void MoveToFrontCodeAndSend() + { + BsPutIntVS(24, origPtr); + GenerateMTFValues(); + SendMTFValues(); + } + + private Stream bsStream; + + private void SimpleSort(int lo, int hi, int d) + { + int i, j, h, bigN, hp; + int v; + + bigN = hi - lo + 1; + if (bigN < 2) + { + return; + } + + hp = 0; + while (incs[hp] < bigN) + { + hp++; + } + hp--; + + for (; hp >= 0; hp--) + { + h = incs[hp]; + + i = lo + h; + while (true) + { + /* copy 1 */ + if (i > hi) + { + break; + } + v = zptr[i]; + j = i; + while (FullGtU(zptr[j - h] + d, v + d)) + { + zptr[j] = zptr[j - h]; + j = j - h; + if (j <= (lo + h - 1)) + { + break; + } + } + zptr[j] = v; + i++; + + /* copy 2 */ + if (i > hi) + { + break; + } + v = zptr[i]; + j = i; + while (FullGtU(zptr[j - h] + d, v + d)) + { + zptr[j] = zptr[j - h]; + j = j - h; + if (j <= (lo + h - 1)) + { + break; + } + } + zptr[j] = v; + i++; + + /* copy 3 */ + if (i > hi) + { + break; + } + v = zptr[i]; + j = i; + while (FullGtU(zptr[j - h] + d, v + d)) + { + zptr[j] = zptr[j - h]; + j = j - h; + if (j <= (lo + h - 1)) + { + break; + } + } + zptr[j] = v; + i++; + + if (workDone > workLimit && firstAttempt) + { + return; + } + } + } + } + + private void Vswap(int p1, int p2, int n) + { + int temp = 0; + while (n > 0) + { + temp = zptr[p1]; + zptr[p1] = zptr[p2]; + zptr[p2] = temp; + p1++; + p2++; + n--; + } + } + + private char Med3(char a, char b, char c) + { + char t; + if (a > b) + { + t = a; + a = b; + b = t; + } + if (b > c) + { + t = b; + b = c; + c = t; + } + if (a > b) + { + b = a; + } + return b; + } + + internal class StackElem + { + internal int ll; + internal int hh; + internal int dd; + } + + private void QSort3(int loSt, int hiSt, int dSt) + { + int unLo, unHi, ltLo, gtHi, med, n, m; + int sp, lo, hi, d; + StackElem[] stack = new StackElem[QSORT_STACK_SIZE]; + for (int count = 0; count < QSORT_STACK_SIZE; count++) + { + stack[count] = new StackElem(); + } + + sp = 0; + + stack[sp].ll = loSt; + stack[sp].hh = hiSt; + stack[sp].dd = dSt; + sp++; + + while (sp > 0) + { + if (sp >= QSORT_STACK_SIZE) + { + Panic(); + } + + sp--; + lo = stack[sp].ll; + hi = stack[sp].hh; + d = stack[sp].dd; + + if (hi - lo < SMALL_THRESH || d > DEPTH_THRESH) + { + SimpleSort(lo, hi, d); + if (workDone > workLimit && firstAttempt) + { + return; + } + continue; + } + + med = Med3(block[zptr[lo] + d + 1], + block[zptr[hi] + d + 1], + block[zptr[(lo + hi) >> 1] + d + 1]); + + unLo = ltLo = lo; + unHi = gtHi = hi; + + while (true) + { + while (true) + { + if (unLo > unHi) + { + break; + } + n = block[zptr[unLo] + d + 1] - med; + if (n == 0) + { + int temp = 0; + temp = zptr[unLo]; + zptr[unLo] = zptr[ltLo]; + zptr[ltLo] = temp; + ltLo++; + unLo++; + continue; + } + ; + if (n > 0) + { + break; + } + unLo++; + } + while (true) + { + if (unLo > unHi) + { + break; + } + n = block[zptr[unHi] + d + 1] - med; + if (n == 0) + { + int temp = 0; + temp = zptr[unHi]; + zptr[unHi] = zptr[gtHi]; + zptr[gtHi] = temp; + gtHi--; + unHi--; + continue; + } + ; + if (n < 0) + { + break; + } + unHi--; + } + if (unLo > unHi) + { + break; + } + int tempx = zptr[unLo]; + zptr[unLo] = zptr[unHi]; + zptr[unHi] = tempx; + unLo++; + unHi--; + } + + if (gtHi < ltLo) + { + stack[sp].ll = lo; + stack[sp].hh = hi; + stack[sp].dd = d + 1; + sp++; + continue; + } + + n = ((ltLo - lo) < (unLo - ltLo)) ? (ltLo - lo) : (unLo - ltLo); + Vswap(lo, unLo - n, n); + m = ((hi - gtHi) < (gtHi - unHi)) ? (hi - gtHi) : (gtHi - unHi); + Vswap(unLo, hi - m + 1, m); + + n = lo + unLo - ltLo - 1; + m = hi - (gtHi - unHi) + 1; + + stack[sp].ll = lo; + stack[sp].hh = n; + stack[sp].dd = d; + sp++; + + stack[sp].ll = n + 1; + stack[sp].hh = m - 1; + stack[sp].dd = d + 1; + sp++; + + stack[sp].ll = m; + stack[sp].hh = hi; + stack[sp].dd = d; + sp++; + } + } + + private void MainSort() + { + int i, j, ss, sb; + int[] runningOrder = new int[256]; + int[] copy = new int[256]; + bool[] bigDone = new bool[256]; + int c1, c2; + int numQSorted; + + /* + In the various block-sized structures, live data runs + from 0 to last+NUM_OVERSHOOT_BYTES inclusive. First, + set up the overshoot area for block. + */ + + // if (verbosity >= 4) fprintf ( stderr, " sort initialise ...\n" ); + for (i = 0; i < BZip2Constants.NUM_OVERSHOOT_BYTES; i++) + { + block[last + i + 2] = block[(i % (last + 1)) + 1]; + } + for (i = 0; i <= last + BZip2Constants.NUM_OVERSHOOT_BYTES; i++) + { + quadrant[i] = 0; + } + + block[0] = block[last + 1]; + + if (last < 4000) + { + /* + Use SimpleSort(), since the full sorting mechanism + has quite a large constant overhead. + */ + for (i = 0; i <= last; i++) + { + zptr[i] = i; + } + firstAttempt = false; + workDone = workLimit = 0; + SimpleSort(0, last, 0); + } + else + { + numQSorted = 0; + for (i = 0; i <= 255; i++) + { + bigDone[i] = false; + } + + for (i = 0; i <= 65536; i++) + { + ftab[i] = 0; + } + + c1 = block[0]; + for (i = 0; i <= last; i++) + { + c2 = block[i + 1]; + ftab[(c1 << 8) + c2]++; + c1 = c2; + } + + for (i = 1; i <= 65536; i++) + { + ftab[i] += ftab[i - 1]; + } + + c1 = block[1]; + for (i = 0; i < last; i++) + { + c2 = block[i + 2]; + j = (c1 << 8) + c2; + c1 = c2; + ftab[j]--; + zptr[ftab[j]] = i; + } + + j = ((block[last + 1]) << 8) + (block[1]); + ftab[j]--; + zptr[ftab[j]] = last; + + /* + Now ftab contains the first loc of every small bucket. + Calculate the running order, from smallest to largest + big bucket. + */ + + for (i = 0; i <= 255; i++) + { + runningOrder[i] = i; + } + + { + int vv; + int h = 1; + do + { + h = 3 * h + 1; + } + while (h <= 256); + do + { + h = h / 3; + for (i = h; i <= 255; i++) + { + vv = runningOrder[i]; + j = i; + while ((ftab[((runningOrder[j - h]) + 1) << 8] + - ftab[(runningOrder[j - h]) << 8]) > + (ftab[((vv) + 1) << 8] - ftab[(vv) << 8])) + { + runningOrder[j] = runningOrder[j - h]; + j = j - h; + if (j <= (h - 1)) + { + break; + } + } + runningOrder[j] = vv; + } + } + while (h != 1); + } + + /* + The main sorting loop. + */ + for (i = 0; i <= 255; i++) + { + /* + Process big buckets, starting with the least full. + */ + ss = runningOrder[i]; + + /* + Complete the big bucket [ss] by quicksorting + any unsorted small buckets [ss, j]. Hopefully + previous pointer-scanning phases have already + completed many of the small buckets [ss, j], so + we don't have to sort them at all. + */ + for (j = 0; j <= 255; j++) + { + sb = (ss << 8) + j; + if (!((ftab[sb] & SETMASK) == SETMASK)) + { + int lo = ftab[sb] & CLEARMASK; + int hi = (ftab[sb + 1] & CLEARMASK) - 1; + if (hi > lo) + { + QSort3(lo, hi, 2); + numQSorted += (hi - lo + 1); + if (workDone > workLimit && firstAttempt) + { + return; + } + } + ftab[sb] |= SETMASK; + } + } + + /* + The ss big bucket is now done. Record this fact, + and update the quadrant descriptors. Remember to + update quadrants in the overshoot area too, if + necessary. The "if (i < 255)" test merely skips + this updating for the last bucket processed, since + updating for the last bucket is pointless. + */ + bigDone[ss] = true; + + if (i < 255) + { + int bbStart = ftab[ss << 8] & CLEARMASK; + int bbSize = (ftab[(ss + 1) << 8] & CLEARMASK) - bbStart; + int shifts = 0; + + while ((bbSize >> shifts) > 65534) + { + shifts++; + } + + for (j = 0; j < bbSize; j++) + { + int a2update = zptr[bbStart + j]; + int qVal = (j >> shifts); + quadrant[a2update] = qVal; + if (a2update < BZip2Constants.NUM_OVERSHOOT_BYTES) + { + quadrant[a2update + last + 1] = qVal; + } + } + + if (!(((bbSize - 1) >> shifts) <= 65535)) + { + Panic(); + } + } + + /* + Now scan this big bucket so as to synthesise the + sorted order for small buckets [t, ss] for all t != ss. + */ + for (j = 0; j <= 255; j++) + { + copy[j] = ftab[(j << 8) + ss] & CLEARMASK; + } + + for (j = ftab[ss << 8] & CLEARMASK; + j < (ftab[(ss + 1) << 8] & CLEARMASK); + j++) + { + c1 = block[zptr[j]]; + if (!bigDone[c1]) + { + zptr[copy[c1]] = zptr[j] == 0 ? last : zptr[j] - 1; + copy[c1]++; + } + } + + for (j = 0; j <= 255; j++) + { + ftab[(j << 8) + ss] |= SETMASK; + } + } + } + } + + private void RandomiseBlock() + { + int i; + int rNToGo = 0; + int rTPos = 0; + for (i = 0; i < 256; i++) + { + inUse[i] = false; + } + + for (i = 0; i <= last; i++) + { + if (rNToGo == 0) + { + rNToGo = (char)BZip2Constants.rNums[rTPos]; + rTPos++; + if (rTPos == 512) + { + rTPos = 0; + } + } + rNToGo--; + block[i + 1] ^= (char)((rNToGo == 1) ? 1 : 0); + + // handle 16 bit signed numbers + block[i + 1] &= (char)0xFF; + + inUse[block[i + 1]] = true; + } + } + + private void DoReversibleTransformation() + { + int i; + + workLimit = workFactor * last; + workDone = 0; + blockRandomised = false; + firstAttempt = true; + + MainSort(); + + if (workDone > workLimit && firstAttempt) + { + RandomiseBlock(); + workLimit = workDone = 0; + blockRandomised = true; + firstAttempt = false; + MainSort(); + } + + origPtr = -1; + for (i = 0; i <= last; i++) + { + if (zptr[i] == 0) + { + origPtr = i; + break; + } + } + ; + + if (origPtr == -1) + { + Panic(); + } + } + + private bool FullGtU(int i1, int i2) + { + int k; + char c1, c2; + int s1, s2; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return (c1 > c2); + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return (c1 > c2); + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return (c1 > c2); + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return (c1 > c2); + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return (c1 > c2); + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return (c1 > c2); + } + i1++; + i2++; + + k = last + 1; + + do + { + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return (c1 > c2); + } + s1 = quadrant[i1]; + s2 = quadrant[i2]; + if (s1 != s2) + { + return (s1 > s2); + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return (c1 > c2); + } + s1 = quadrant[i1]; + s2 = quadrant[i2]; + if (s1 != s2) + { + return (s1 > s2); + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return (c1 > c2); + } + s1 = quadrant[i1]; + s2 = quadrant[i2]; + if (s1 != s2) + { + return (s1 > s2); + } + i1++; + i2++; + + c1 = block[i1 + 1]; + c2 = block[i2 + 1]; + if (c1 != c2) + { + return (c1 > c2); + } + s1 = quadrant[i1]; + s2 = quadrant[i2]; + if (s1 != s2) + { + return (s1 > s2); + } + i1++; + i2++; + + if (i1 > last) + { + i1 -= last; + i1--; + } + ; + if (i2 > last) + { + i2 -= last; + i2--; + } + ; + + k -= 4; + workDone++; + } + while (k >= 0); + + return false; + } + + /* + Knuth's increments seem to work better + than Incerpi-Sedgewick here. Possibly + because the number of elems to sort is + usually small, typically <= 20. + */ + + private readonly int[] incs = + { + 1, 4, 13, 40, 121, 364, 1093, 3280, + 9841, 29524, 88573, 265720, + 797161, 2391484 + }; + + private void AllocateCompressStructures() + { + int n = BZip2Constants.baseBlockSize * blockSize100k; + block = new char[(n + 1 + BZip2Constants.NUM_OVERSHOOT_BYTES)]; + quadrant = new int[(n + BZip2Constants.NUM_OVERSHOOT_BYTES)]; + zptr = new int[n]; + ftab = new int[65537]; + + if (block == null || quadrant == null || zptr == null + || ftab == null) + { + //int totalDraw = (n + 1 + NUM_OVERSHOOT_BYTES) + (n + NUM_OVERSHOOT_BYTES) + n + 65537; + //compressOutOfMemory ( totalDraw, n ); + } + + /* + The back end needs a place to store the MTF values + whilst it calculates the coding tables. We could + put them in the zptr array. However, these values + will fit in a short, so we overlay szptr at the + start of zptr, in the hope of reducing the number + of cache misses induced by the multiple traversals + of the MTF values when calculating coding tables. + Seems to improve compression speed by about 1%. + */ + // szptr = zptr; + + szptr = new short[2 * n]; + } + + private void GenerateMTFValues() + { + char[] yy = new char[256]; + int i, j; + char tmp; + char tmp2; + int zPend; + int wr; + int EOB; + + MakeMaps(); + EOB = nInUse + 1; + + for (i = 0; i <= EOB; i++) + { + mtfFreq[i] = 0; + } + + wr = 0; + zPend = 0; + for (i = 0; i < nInUse; i++) + { + yy[i] = (char)i; + } + + for (i = 0; i <= last; i++) + { + char ll_i; + + ll_i = unseqToSeq[block[zptr[i]]]; + + j = 0; + tmp = yy[j]; + while (ll_i != tmp) + { + j++; + tmp2 = tmp; + tmp = yy[j]; + yy[j] = tmp2; + } + ; + yy[0] = tmp; + + if (j == 0) + { + zPend++; + } + else + { + if (zPend > 0) + { + zPend--; + while (true) + { + switch (zPend % 2) + { + case 0: + szptr[wr] = BZip2Constants.RUNA; + wr++; + mtfFreq[BZip2Constants.RUNA]++; + break; + case 1: + szptr[wr] = BZip2Constants.RUNB; + wr++; + mtfFreq[BZip2Constants.RUNB]++; + break; + } + ; + if (zPend < 2) + { + break; + } + zPend = (zPend - 2) / 2; + } + ; + zPend = 0; + } + szptr[wr] = (short)(j + 1); + wr++; + mtfFreq[j + 1]++; + } + } + + if (zPend > 0) + { + zPend--; + while (true) + { + switch (zPend % 2) + { + case 0: + szptr[wr] = BZip2Constants.RUNA; + wr++; + mtfFreq[BZip2Constants.RUNA]++; + break; + case 1: + szptr[wr] = BZip2Constants.RUNB; + wr++; + mtfFreq[BZip2Constants.RUNB]++; + break; + } + if (zPend < 2) + { + break; + } + zPend = (zPend - 2) / 2; + } + } + + szptr[wr] = (short)EOB; + wr++; + mtfFreq[EOB]++; + + nMTF = wr; + } + + public override int Read(byte[] buffer, int offset, int count) + { + return 0; + } + + public override int ReadByte() + { + return -1; + } + + public override long Seek(long offset, SeekOrigin origin) + { + return 0; + } + + public override void SetLength(long value) + { + } + + public override void Write(byte[] buffer, int offset, int count) + { + for (int k = 0; k < count; ++k) + { + WriteByte(buffer[k + offset]); + } + } + + public override bool CanRead => false; + + public override bool CanSeek => false; + + public override bool CanWrite => true; + + public override long Length => 0; + + public override long Position { get { return 0; } set { } } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CRC.cs b/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CRC.cs new file mode 100644 index 0000000000..7ac42130f2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/BZip2/CRC.cs @@ -0,0 +1,203 @@ +/* + * Copyright 2001,2004-2005 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This package is based on the work done by Keiron Liddle), Aftex Software + * to whom the Ant project is very grateful for his + * great code. + */ + +namespace SharpCompress.Compressors.BZip2 +{ + /** + * A simple class the hold and calculate the CRC for sanity checking + * of the data. + * + * @author Keiron Liddle + */ + + internal class CRC + { + public static int[] crc32Table = + { + 0x00000000, 0x04c11db7, 0x09823b6e, + 0x0d4326d9, + 0x130476dc, 0x17c56b6b, 0x1a864db2, + 0x1e475005, + 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, + 0x2b4bcb61, + 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, + 0x384fbdbd, + 0x4c11db70, 0x48d0c6c7, 0x4593e01e, + 0x4152fda9, + 0x5f15adac, 0x5bd4b01b, 0x569796c2, + 0x52568b75, + 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, + 0x675a1011, + 0x791d4014, 0x7ddc5da3, 0x709f7b7a, + 0x745e66cd, + unchecked((int)0x9823b6e0), unchecked((int)0x9ce2ab57), unchecked((int)0x91a18d8e), + unchecked((int)0x95609039), + unchecked((int)0x8b27c03c), unchecked((int)0x8fe6dd8b), unchecked((int)0x82a5fb52), + unchecked((int)0x8664e6e5), + unchecked((int)0xbe2b5b58), unchecked((int)0xbaea46ef), unchecked((int)0xb7a96036), + unchecked((int)0xb3687d81), + unchecked((int)0xad2f2d84), unchecked((int)0xa9ee3033), unchecked((int)0xa4ad16ea), + unchecked((int)0xa06c0b5d), + unchecked((int)0xd4326d90), unchecked((int)0xd0f37027), unchecked((int)0xddb056fe), + unchecked((int)0xd9714b49), + unchecked((int)0xc7361b4c), unchecked((int)0xc3f706fb), unchecked((int)0xceb42022), + unchecked((int)0xca753d95), + unchecked((int)0xf23a8028), unchecked((int)0xf6fb9d9f), unchecked((int)0xfbb8bb46), + unchecked((int)0xff79a6f1), + unchecked((int)0xe13ef6f4), unchecked((int)0xe5ffeb43), unchecked((int)0xe8bccd9a), + unchecked((int)0xec7dd02d), + 0x34867077, 0x30476dc0, 0x3d044b19, + 0x39c556ae, + 0x278206ab, 0x23431b1c, 0x2e003dc5, + 0x2ac12072, + 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, + 0x1fcdbb16, + 0x018aeb13, 0x054bf6a4, 0x0808d07d, + 0x0cc9cdca, + 0x7897ab07, 0x7c56b6b0, 0x71159069, + 0x75d48dde, + 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, + 0x66d0fb02, + 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, + 0x53dc6066, + 0x4d9b3063, 0x495a2dd4, 0x44190b0d, + 0x40d816ba, + unchecked((int)0xaca5c697), unchecked((int)0xa864db20), unchecked((int)0xa527fdf9), + unchecked((int)0xa1e6e04e), + unchecked((int)0xbfa1b04b), unchecked((int)0xbb60adfc), unchecked((int)0xb6238b25), + unchecked((int)0xb2e29692), + unchecked((int)0x8aad2b2f), unchecked((int)0x8e6c3698), unchecked((int)0x832f1041), + unchecked((int)0x87ee0df6), + unchecked((int)0x99a95df3), unchecked((int)0x9d684044), unchecked((int)0x902b669d), + unchecked((int)0x94ea7b2a), + unchecked((int)0xe0b41de7), unchecked((int)0xe4750050), unchecked((int)0xe9362689), + unchecked((int)0xedf73b3e), + unchecked((int)0xf3b06b3b), unchecked((int)0xf771768c), unchecked((int)0xfa325055), + unchecked((int)0xfef34de2), + unchecked((int)0xc6bcf05f), unchecked((int)0xc27dede8), unchecked((int)0xcf3ecb31), + unchecked((int)0xcbffd686), + unchecked((int)0xd5b88683), unchecked((int)0xd1799b34), unchecked((int)0xdc3abded), + unchecked((int)0xd8fba05a), + 0x690ce0ee, 0x6dcdfd59, 0x608edb80, + 0x644fc637, + 0x7a089632, 0x7ec98b85, 0x738aad5c, + 0x774bb0eb, + 0x4f040d56, 0x4bc510e1, 0x46863638, + 0x42472b8f, + 0x5c007b8a, 0x58c1663d, 0x558240e4, + 0x51435d53, + 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, + 0x285e1d47, + 0x36194d42, 0x32d850f5, 0x3f9b762c, + 0x3b5a6b9b, + 0x0315d626, 0x07d4cb91, 0x0a97ed48, + 0x0e56f0ff, + 0x1011a0fa, 0x14d0bd4d, 0x19939b94, + 0x1d528623, + unchecked((int)0xf12f560e), unchecked((int)0xf5ee4bb9), unchecked((int)0xf8ad6d60), + unchecked((int)0xfc6c70d7), + unchecked((int)0xe22b20d2), unchecked((int)0xe6ea3d65), unchecked((int)0xeba91bbc), + unchecked((int)0xef68060b), + unchecked((int)0xd727bbb6), unchecked((int)0xd3e6a601), unchecked((int)0xdea580d8), + unchecked((int)0xda649d6f), + unchecked((int)0xc423cd6a), unchecked((int)0xc0e2d0dd), unchecked((int)0xcda1f604), + unchecked((int)0xc960ebb3), + unchecked((int)0xbd3e8d7e), unchecked((int)0xb9ff90c9), unchecked((int)0xb4bcb610), + unchecked((int)0xb07daba7), + unchecked((int)0xae3afba2), unchecked((int)0xaafbe615), unchecked((int)0xa7b8c0cc), + unchecked((int)0xa379dd7b), + unchecked((int)0x9b3660c6), unchecked((int)0x9ff77d71), unchecked((int)0x92b45ba8), + unchecked((int)0x9675461f), + unchecked((int)0x8832161a), unchecked((int)0x8cf30bad), unchecked((int)0x81b02d74), + unchecked((int)0x857130c3), + 0x5d8a9099, 0x594b8d2e, 0x5408abf7, + 0x50c9b640, + 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, + 0x43cdc09c, + 0x7b827d21, 0x7f436096, 0x7200464f, + 0x76c15bf8, + 0x68860bfd, 0x6c47164a, 0x61043093, + 0x65c52d24, + 0x119b4be9, 0x155a565e, 0x18197087, + 0x1cd86d30, + 0x029f3d35, 0x065e2082, 0x0b1d065b, + 0x0fdc1bec, + 0x3793a651, 0x3352bbe6, 0x3e119d3f, + 0x3ad08088, + 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, + 0x29d4f654, + unchecked((int)0xc5a92679), unchecked((int)0xc1683bce), unchecked((int)0xcc2b1d17), + unchecked((int)0xc8ea00a0), + unchecked((int)0xd6ad50a5), unchecked((int)0xd26c4d12), unchecked((int)0xdf2f6bcb), + unchecked((int)0xdbee767c), + unchecked((int)0xe3a1cbc1), unchecked((int)0xe760d676), unchecked((int)0xea23f0af), + unchecked((int)0xeee2ed18), + unchecked((int)0xf0a5bd1d), unchecked((int)0xf464a0aa), unchecked((int)0xf9278673), + unchecked((int)0xfde69bc4), + unchecked((int)0x89b8fd09), unchecked((int)0x8d79e0be), unchecked((int)0x803ac667), + unchecked((int)0x84fbdbd0), + unchecked((int)0x9abc8bd5), unchecked((int)0x9e7d9662), unchecked((int)0x933eb0bb), + unchecked((int)0x97ffad0c), + unchecked((int)0xafb010b1), unchecked((int)0xab710d06), unchecked((int)0xa6322bdf), + unchecked((int)0xa2f33668), + unchecked((int)0xbcb4666d), unchecked((int)0xb8757bda), unchecked((int)0xb5365d03), + unchecked((int)0xb1f740b4) + }; + + public CRC() + { + InitialiseCRC(); + } + + internal void InitialiseCRC() + { + globalCrc = unchecked((int)0xffffffff); + } + + internal int GetFinalCRC() + { + return ~globalCrc; + } + + internal int GetGlobalCRC() + { + return globalCrc; + } + + internal void SetGlobalCRC(int newCrc) + { + globalCrc = newCrc; + } + + internal void UpdateCRC(int inCh) + { + int temp = (globalCrc >> 24) ^ inCh; + if (temp < 0) + { + temp = 256 + temp; + } + globalCrc = (globalCrc << 8) ^ crc32Table[temp]; + } + + internal int globalCrc; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/CompressionMode.cs b/BizHawk.Client.Common/SharpCompress/Compressors/CompressionMode.cs new file mode 100644 index 0000000000..0d288bc5e3 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/CompressionMode.cs @@ -0,0 +1,8 @@ +namespace SharpCompress.Compressors +{ + public enum CompressionMode + { + Compress = 0, + Decompress = 1 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/CRC32.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/CRC32.cs new file mode 100644 index 0000000000..4c38b9fc5b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/CRC32.cs @@ -0,0 +1,293 @@ +// Crc32.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2006-2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2010-January-16 13:16:27> +// +// ------------------------------------------------------------------ +// +// Implements the CRC algorithm, which is used in zip files. The zip format calls for +// the zipfile to contain a CRC for the unencrypted byte stream of each file. +// +// It is based on example source code published at +// http://www.vbaccelerator.com/home/net/code/libraries/CRC32/Crc32_zip_CRC32_CRC32_cs.asp +// +// This implementation adds a tweak of that code for use within zip creation. While +// computing the CRC we also compress the byte stream, in the same read loop. This +// avoids the need to read through the uncompressed stream twice - once to compute CRC +// and another time to compress. +// +// ------------------------------------------------------------------ + +using System; +using System.IO; + +namespace SharpCompress.Compressors.Deflate +{ + /// + /// Calculates a 32bit Cyclic Redundancy Checksum (CRC) using the same polynomial + /// used by Zip. This type is used internally by DotNetZip; it is generally not used + /// directly by applications wishing to create, read, or manipulate zip archive + /// files. + /// + internal class CRC32 + { + private const int BUFFER_SIZE = 8192; + private static readonly UInt32[] crc32Table; + private UInt32 runningCrc32Result = 0xFFFFFFFF; + + static CRC32() + { + unchecked + { + // PKZip specifies CRC32 with a polynomial of 0xEDB88320; + // This is also the CRC-32 polynomial used bby Ethernet, FDDI, + // bzip2, gzip, and others. + // Often the polynomial is shown reversed as 0x04C11DB7. + // For more details, see http://en.wikipedia.org/wiki/Cyclic_redundancy_check + UInt32 dwPolynomial = 0xEDB88320; + UInt32 i, j; + + crc32Table = new UInt32[256]; + + UInt32 dwCrc; + for (i = 0; i < 256; i++) + { + dwCrc = i; + for (j = 8; j > 0; j--) + { + if ((dwCrc & 1) == 1) + { + dwCrc = (dwCrc >> 1) ^ dwPolynomial; + } + else + { + dwCrc >>= 1; + } + } + crc32Table[i] = dwCrc; + } + } + } + + /// + /// indicates the total number of bytes read on the CRC stream. + /// This is used when writing the ZipDirEntry when compressing files. + /// + public Int64 TotalBytesRead { get; private set; } + + /// + /// Indicates the current CRC for all blocks slurped in. + /// + public Int32 Crc32Result => unchecked((Int32)(~runningCrc32Result)); + + /// + /// Returns the CRC32 for the specified stream. + /// + /// The stream over which to calculate the CRC32 + /// the CRC32 calculation + public UInt32 GetCrc32(Stream input) + { + return GetCrc32AndCopy(input, null); + } + + /// + /// Returns the CRC32 for the specified stream, and writes the input into the + /// output stream. + /// + /// The stream over which to calculate the CRC32 + /// The stream into which to deflate the input + /// the CRC32 calculation + public UInt32 GetCrc32AndCopy(Stream input, Stream output) + { + if (input == null) + { + throw new ZlibException("The input stream must not be null."); + } + + unchecked + { + //UInt32 crc32Result; + //crc32Result = 0xFFFFFFFF; + var buffer = new byte[BUFFER_SIZE]; + int readSize = BUFFER_SIZE; + + TotalBytesRead = 0; + int count = input.Read(buffer, 0, readSize); + if (output != null) + { + output.Write(buffer, 0, count); + } + TotalBytesRead += count; + while (count > 0) + { + SlurpBlock(buffer, 0, count); + count = input.Read(buffer, 0, readSize); + if (output != null) + { + output.Write(buffer, 0, count); + } + TotalBytesRead += count; + } + + return ~runningCrc32Result; + } + } + + /// + /// Get the CRC32 for the given (word,byte) combo. This is a computation + /// defined by PKzip. + /// + /// The word to start with. + /// The byte to combine it with. + /// The CRC-ized result. + public Int32 ComputeCrc32(Int32 W, byte B) + { + return _InternalComputeCrc32((UInt32)W, B); + } + + internal Int32 _InternalComputeCrc32(UInt32 W, byte B) + { + return (Int32)(crc32Table[(W ^ B) & 0xFF] ^ (W >> 8)); + } + + /// + /// Update the value for the running CRC32 using the given block of bytes. + /// This is useful when using the CRC32() class in a Stream. + /// + /// block of bytes to slurp + /// starting point in the block + /// how many bytes within the block to slurp + public void SlurpBlock(byte[] block, int offset, int count) + { + if (block == null) + { + throw new ZlibException("The data buffer must not be null."); + } + + for (int i = 0; i < count; i++) + { + int x = offset + i; + runningCrc32Result = ((runningCrc32Result) >> 8) ^ + crc32Table[(block[x]) ^ ((runningCrc32Result) & 0x000000FF)]; + } + TotalBytesRead += count; + } + + // pre-initialize the crc table for speed of lookup. + + private uint gf2_matrix_times(uint[] matrix, uint vec) + { + uint sum = 0; + int i = 0; + while (vec != 0) + { + if ((vec & 0x01) == 0x01) + { + sum ^= matrix[i]; + } + vec >>= 1; + i++; + } + return sum; + } + + private void gf2_matrix_square(uint[] square, uint[] mat) + { + for (int i = 0; i < 32; i++) + { + square[i] = gf2_matrix_times(mat, mat[i]); + } + } + + /// + /// Combines the given CRC32 value with the current running total. + /// + /// + /// This is useful when using a divide-and-conquer approach to calculating a CRC. + /// Multiple threads can each calculate a CRC32 on a segment of the data, and then + /// combine the individual CRC32 values at the end. + /// + /// the crc value to be combined with this one + /// the length of data the CRC value was calculated on + public void Combine(int crc, int length) + { + var even = new uint[32]; // even-power-of-two zeros operator + var odd = new uint[32]; // odd-power-of-two zeros operator + + if (length == 0) + { + return; + } + + uint crc1 = ~runningCrc32Result; + var crc2 = (uint)crc; + + // put operator for one zero bit in odd + odd[0] = 0xEDB88320; // the CRC-32 polynomial + uint row = 1; + for (int i = 1; i < 32; i++) + { + odd[i] = row; + row <<= 1; + } + + // put operator for two zero bits in even + gf2_matrix_square(even, odd); + + // put operator for four zero bits in odd + gf2_matrix_square(odd, even); + + var len2 = (uint)length; + + // apply len2 zeros to crc1 (first square will put the operator for one + // zero byte, eight zero bits, in even) + do + { + // apply zeros operator for this bit of len2 + gf2_matrix_square(even, odd); + + if ((len2 & 1) == 1) + { + crc1 = gf2_matrix_times(even, crc1); + } + len2 >>= 1; + + if (len2 == 0) + { + break; + } + + // another iteration of the loop with odd and even swapped + gf2_matrix_square(odd, even); + if ((len2 & 1) == 1) + { + crc1 = gf2_matrix_times(odd, crc1); + } + len2 >>= 1; + } + while (len2 != 0); + + crc1 ^= crc2; + + runningCrc32Result = ~crc1; + + //return (int) crc1; + } + + // private member vars + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/DeflateManager.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/DeflateManager.cs new file mode 100644 index 0000000000..ddc6f7b354 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/DeflateManager.cs @@ -0,0 +1,1987 @@ +// Deflate.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-October-28 13:44:59> +// +// ------------------------------------------------------------------ +// +// This module defines logic for handling the Deflate or compression. +// +// This code is based on multiple sources: +// - the original zlib v1.2.3 source, which is Copyright (C) 1995-2005 Jean-loup Gailly. +// - the original jzlib, which is Copyright (c) 2000-2003 ymnk, JCraft,Inc. +// +// However, this code is significantly different from both. +// The object model is not the same, and many of the behaviors are different. +// +// In keeping with the license for these other works, the copyrights for +// jzlib and zlib are here. +// +// ----------------------------------------------------------------------- +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + +using System; + +namespace SharpCompress.Compressors.Deflate +{ + internal sealed partial class DeflateManager + { + // extra bits for each length code + internal static readonly int[] ExtraLengthBits = + { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 + }; + + // extra bits for each distance code + internal static readonly int[] ExtraDistanceBits = + { + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 + }; + + internal enum BlockState + { + NeedMore = 0, // block not completed, need more input or more output + BlockDone, // block flush performed + FinishStarted, // finish started, need only more output at next deflate + FinishDone // finish done, accept no more input or output + } + + internal enum DeflateFlavor + { + Store, + Fast, + Slow + } + + private const int MEM_LEVEL_MAX = 9; + private const int MEM_LEVEL_DEFAULT = 8; + + internal delegate BlockState CompressFunc(FlushType flush); + + internal class Config + { + // Use a faster search when the previous match is longer than this + internal int GoodLength; // reduce lazy search above this match length + + // Attempt to find a better match only when the current match is + // strictly smaller than this value. This mechanism is used only for + // compression levels >= 4. For levels 1,2,3: MaxLazy is actually + // MaxInsertLength. (See DeflateFast) + + internal int MaxLazy; // do not perform lazy search above this match length + + internal int NiceLength; // quit search above this match length + + // To speed up deflation, hash chains are never searched beyond this + // length. A higher limit improves compression ratio but degrades the speed. + + internal int MaxChainLength; + + internal DeflateFlavor Flavor; + + private Config(int goodLength, int maxLazy, int niceLength, int maxChainLength, DeflateFlavor flavor) + { + GoodLength = goodLength; + MaxLazy = maxLazy; + NiceLength = niceLength; + MaxChainLength = maxChainLength; + Flavor = flavor; + } + + public static Config Lookup(CompressionLevel level) + { + return Table[(int)level]; + } + + static Config() + { + Table = new[] + { + new Config(0, 0, 0, 0, DeflateFlavor.Store), + new Config(4, 4, 8, 4, DeflateFlavor.Fast), + new Config(4, 5, 16, 8, DeflateFlavor.Fast), + new Config(4, 6, 32, 32, DeflateFlavor.Fast), + new Config(4, 4, 16, 16, DeflateFlavor.Slow), + new Config(8, 16, 32, 32, DeflateFlavor.Slow), + new Config(8, 16, 128, 128, DeflateFlavor.Slow), + new Config(8, 32, 128, 256, DeflateFlavor.Slow), + new Config(32, 128, 258, 1024, DeflateFlavor.Slow), + new Config(32, 258, 258, 4096, DeflateFlavor.Slow) + }; + } + + private static readonly Config[] Table; + } + + private CompressFunc DeflateFunction; + + private static readonly String[] _ErrorMessage = + { + "need dictionary", + "stream end", + "", + "file error", + "stream error", + "data error", + "insufficient memory", + "buffer error", + "incompatible version", + "" + }; + + // preset dictionary flag in zlib header + private const int PRESET_DICT = 0x20; + + private const int INIT_STATE = 42; + private const int BUSY_STATE = 113; + private const int FINISH_STATE = 666; + + // The deflate compression method + private const int Z_DEFLATED = 8; + + private const int STORED_BLOCK = 0; + private const int STATIC_TREES = 1; + private const int DYN_TREES = 2; + + // The three kinds of block type + private const int Z_BINARY = 0; + private const int Z_ASCII = 1; + private const int Z_UNKNOWN = 2; + + private const int Buf_size = 8 * 2; + + private const int MIN_MATCH = 3; + private const int MAX_MATCH = 258; + + private const int MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1); + + private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1); + + private const int END_BLOCK = 256; + + internal ZlibCodec _codec; // the zlib encoder/decoder + internal int status; // as the name implies + internal byte[] pending; // output still pending - waiting to be compressed + internal int nextPending; // index of next pending byte to output to the stream + internal int pendingCount; // number of bytes in the pending buffer + + internal sbyte data_type; // UNKNOWN, BINARY or ASCII + internal int last_flush; // value of flush param for previous deflate call + + internal int w_size; // LZ77 window size (32K by default) + internal int w_bits; // log2(w_size) (8..16) + internal int w_mask; // w_size - 1 + + //internal byte[] dictionary; + internal byte[] window; + + // Sliding window. Input bytes are read into the second half of the window, + // and move to the first half later to keep a dictionary of at least wSize + // bytes. With this organization, matches are limited to a distance of + // wSize-MAX_MATCH bytes, but this ensures that IO is always + // performed with a length multiple of the block size. + // + // To do: use the user input buffer as sliding window. + + internal int window_size; + + // Actual size of window: 2*wSize, except when the user input buffer + // is directly used as sliding window. + + internal short[] prev; + + // Link to older string with same hash index. To limit the size of this + // array to 64K, this link is maintained only for the last 32K strings. + // An index in this array is thus a window index modulo 32K. + + private short[] head; // Heads of the hash chains or NIL. + + private int ins_h; // hash index of string to be inserted + private int hash_size; // number of elements in hash table + private int hash_bits; // log2(hash_size) + private int hash_mask; // hash_size-1 + + // Number of bits by which ins_h must be shifted at each input + // step. It must be such that after MIN_MATCH steps, the oldest + // byte no longer takes part in the hash key, that is: + // hash_shift * MIN_MATCH >= hash_bits + private int hash_shift; + + // Window position at the beginning of the current output block. Gets + // negative when the window is moved backwards. + + private int blockStart; + + private Config config; + private int match_length; // length of best match + private int prev_match; // previous match + private int match_available; // set if previous match exists + private int strstart; // start of string to insert into.....???? + private int match_start; // start of matching string + private int lookahead; // number of valid bytes ahead in window + + // Length of the best match at previous step. Matches not greater than this + // are discarded. This is used in the lazy match evaluation. + private int prev_length; + + // Insert new strings in the hash table only if the match length is not + // greater than this length. This saves time but degrades compression. + // max_insert_length is used only for compression levels <= 3. + + private CompressionLevel compressionLevel; // compression level (1..9) + private CompressionStrategy compressionStrategy; // favor or force Huffman coding + + private readonly short[] dyn_ltree; // literal and length tree + private readonly short[] dyn_dtree; // distance tree + private readonly short[] bl_tree; // Huffman tree for bit lengths + + private readonly Tree treeLiterals = new Tree(); // desc for literal tree + private readonly Tree treeDistances = new Tree(); // desc for distance tree + private readonly Tree treeBitLengths = new Tree(); // desc for bit length tree + + // number of codes at each bit length for an optimal tree + private readonly short[] bl_count = new short[InternalConstants.MAX_BITS + 1]; + + // heap used to build the Huffman trees + private readonly int[] heap = new int[2 * InternalConstants.L_CODES + 1]; + + private int heap_len; // number of elements in the heap + private int heap_max; // element of largest frequency + + // The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + // The same heap array is used to build all trees. + + // Depth of each subtree used as tie breaker for trees of equal frequency + private readonly sbyte[] depth = new sbyte[2 * InternalConstants.L_CODES + 1]; + + private int _lengthOffset; // index for literals or lengths + + // Size of match buffer for literals/lengths. There are 4 reasons for + // limiting lit_bufsize to 64K: + // - frequencies can be kept in 16 bit counters + // - if compression is not successful for the first block, all input + // data is still in the window so we can still emit a stored block even + // when input comes from standard input. (This can also be done for + // all blocks if lit_bufsize is not greater than 32K.) + // - if compression is not successful for a file smaller than 64K, we can + // even emit a stored file instead of a stored block (saving 5 bytes). + // This is applicable only for zip (not gzip or zlib). + // - creating new Huffman trees less frequently may not provide fast + // adaptation to changes in the input data statistics. (Take for + // example a binary file with poorly compressible code followed by + // a highly compressible string table.) Smaller buffer sizes give + // fast adaptation but have of course the overhead of transmitting + // trees more frequently. + + internal int lit_bufsize; + + internal int last_lit; // running index in l_buf + + // Buffer for distances. To simplify the code, d_buf and l_buf have + // the same number of elements. To use different lengths, an extra flag + // array would be necessary. + + internal int _distanceOffset; // index into pending; points to distance data?? + + internal int opt_len; // bit length of current block with optimal trees + internal int static_len; // bit length of current block with static trees + internal int matches; // number of string matches in current block + internal int last_eob_len; // bit length of EOB code for last block + + // Output buffer. bits are inserted starting at the bottom (least + // significant bits). + internal short bi_buf; + + // Number of valid bits in bi_buf. All bits above the last valid bit + // are always zero. + internal int bi_valid; + + internal DeflateManager() + { + dyn_ltree = new short[HEAP_SIZE * 2]; + dyn_dtree = new short[(2 * InternalConstants.D_CODES + 1) * 2]; // distance tree + bl_tree = new short[(2 * InternalConstants.BL_CODES + 1) * 2]; // Huffman tree for bit lengths + } + + // lm_init + private void _InitializeLazyMatch() + { + window_size = 2 * w_size; + + // clear the hash - workitem 9063 + Array.Clear(head, 0, hash_size); + + //for (int i = 0; i < hash_size; i++) head[i] = 0; + + config = Config.Lookup(compressionLevel); + SetDeflater(); + + strstart = 0; + blockStart = 0; + lookahead = 0; + match_length = prev_length = MIN_MATCH - 1; + match_available = 0; + ins_h = 0; + } + + // Initialize the tree data structures for a new zlib stream. + private void _InitializeTreeData() + { + treeLiterals.dyn_tree = dyn_ltree; + treeLiterals.staticTree = StaticTree.Literals; + + treeDistances.dyn_tree = dyn_dtree; + treeDistances.staticTree = StaticTree.Distances; + + treeBitLengths.dyn_tree = bl_tree; + treeBitLengths.staticTree = StaticTree.BitLengths; + + bi_buf = 0; + bi_valid = 0; + last_eob_len = 8; // enough lookahead for inflate + + // Initialize the first block of the first file: + _InitializeBlocks(); + } + + internal void _InitializeBlocks() + { + // Initialize the trees. + for (int i = 0; i < InternalConstants.L_CODES; i++) + { + dyn_ltree[i * 2] = 0; + } + for (int i = 0; i < InternalConstants.D_CODES; i++) + { + dyn_dtree[i * 2] = 0; + } + for (int i = 0; i < InternalConstants.BL_CODES; i++) + { + bl_tree[i * 2] = 0; + } + + dyn_ltree[END_BLOCK * 2] = 1; + opt_len = static_len = 0; + last_lit = matches = 0; + } + + // Restore the heap property by moving down the tree starting at node k, + // exchanging a node with the smallest of its two sons if necessary, stopping + // when the heap property is re-established (each father smaller than its + // two sons). + internal void pqdownheap(short[] tree, int k) + { + int v = heap[k]; + int j = k << 1; // left son of k + while (j <= heap_len) + { + // Set j to the smallest of the two sons: + if (j < heap_len && IsSmaller(tree, heap[j + 1], heap[j], depth)) + { + j++; + } + + // Exit if v is smaller than both sons + if (IsSmaller(tree, v, heap[j], depth)) + { + break; + } + + // Exchange v with the smallest son + heap[k] = heap[j]; + k = j; + + // And continue down the tree, setting j to the left son of k + j <<= 1; + } + heap[k] = v; + } + + internal static bool IsSmaller(short[] tree, int n, int m, sbyte[] depth) + { + short tn2 = tree[n * 2]; + short tm2 = tree[m * 2]; + return (tn2 < tm2 || (tn2 == tm2 && depth[n] <= depth[m])); + } + + // Scan a literal or distance tree to determine the frequencies of the codes + // in the bit length tree. + internal void ScanTree(short[] tree, int maxCode) + { + int n; // iterates over all tree elements + int prevlen = -1; // last emitted length + int curlen; // length of current code + int nextlen = tree[0 * 2 + 1]; // length of next code + int count = 0; // repeat count of the current code + int max_count = 7; // max repeat count + int min_count = 4; // min repeat count + + if (nextlen == 0) + { + max_count = 138; + min_count = 3; + } + tree[(maxCode + 1) * 2 + 1] = 0x7fff; // guard //?? + + for (n = 0; n <= maxCode; n++) + { + curlen = nextlen; + nextlen = tree[(n + 1) * 2 + 1]; + if (++count < max_count && curlen == nextlen) + { + continue; + } + if (count < min_count) + { + bl_tree[curlen * 2] = (short)(bl_tree[curlen * 2] + count); + } + else if (curlen != 0) + { + if (curlen != prevlen) + { + bl_tree[curlen * 2]++; + } + bl_tree[InternalConstants.REP_3_6 * 2]++; + } + else if (count <= 10) + { + bl_tree[InternalConstants.REPZ_3_10 * 2]++; + } + else + { + bl_tree[InternalConstants.REPZ_11_138 * 2]++; + } + count = 0; + prevlen = curlen; + if (nextlen == 0) + { + max_count = 138; + min_count = 3; + } + else if (curlen == nextlen) + { + max_count = 6; + min_count = 3; + } + else + { + max_count = 7; + min_count = 4; + } + } + } + + // Construct the Huffman tree for the bit lengths and return the index in + // bl_order of the last bit length code to send. + internal int BuildBlTree() + { + int max_blindex; // index of last bit length code of non zero freq + + // Determine the bit length frequencies for literal and distance trees + ScanTree(dyn_ltree, treeLiterals.max_code); + ScanTree(dyn_dtree, treeDistances.max_code); + + // Build the bit length tree: + treeBitLengths.build_tree(this); + + // opt_len now includes the length of the tree representations, except + // the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + + // Determine the number of bit length codes to send. The pkzip format + // requires that at least 4 bit length codes be sent. (appnote.txt says + // 3 but the actual value used is 4.) + for (max_blindex = InternalConstants.BL_CODES - 1; max_blindex >= 3; max_blindex--) + { + if (bl_tree[Tree.bl_order[max_blindex] * 2 + 1] != 0) + { + break; + } + } + + // Update opt_len to include the bit length tree and counts + opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; + + return max_blindex; + } + + // Send the header for a block using dynamic Huffman trees: the counts, the + // lengths of the bit length codes, the literal tree and the distance tree. + // IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + internal void send_all_trees(int lcodes, int dcodes, int blcodes) + { + int rank; // index in bl_order + + send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt + send_bits(dcodes - 1, 5); + send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt + for (rank = 0; rank < blcodes; rank++) + { + send_bits(bl_tree[Tree.bl_order[rank] * 2 + 1], 3); + } + send_tree(dyn_ltree, lcodes - 1); // literal tree + send_tree(dyn_dtree, dcodes - 1); // distance tree + } + + // Send a literal or distance tree in compressed form, using the codes in + // bl_tree. + internal void send_tree(short[] tree, int max_code) + { + int n; // iterates over all tree elements + int prevlen = -1; // last emitted length + int curlen; // length of current code + int nextlen = tree[0 * 2 + 1]; // length of next code + int count = 0; // repeat count of the current code + int max_count = 7; // max repeat count + int min_count = 4; // min repeat count + + if (nextlen == 0) + { + max_count = 138; + min_count = 3; + } + + for (n = 0; n <= max_code; n++) + { + curlen = nextlen; + nextlen = tree[(n + 1) * 2 + 1]; + if (++count < max_count && curlen == nextlen) + { + continue; + } + if (count < min_count) + { + do + { + send_code(curlen, bl_tree); + } + while (--count != 0); + } + else if (curlen != 0) + { + if (curlen != prevlen) + { + send_code(curlen, bl_tree); + count--; + } + send_code(InternalConstants.REP_3_6, bl_tree); + send_bits(count - 3, 2); + } + else if (count <= 10) + { + send_code(InternalConstants.REPZ_3_10, bl_tree); + send_bits(count - 3, 3); + } + else + { + send_code(InternalConstants.REPZ_11_138, bl_tree); + send_bits(count - 11, 7); + } + count = 0; + prevlen = curlen; + if (nextlen == 0) + { + max_count = 138; + min_count = 3; + } + else if (curlen == nextlen) + { + max_count = 6; + min_count = 3; + } + else + { + max_count = 7; + min_count = 4; + } + } + } + + // Output a block of bytes on the stream. + // IN assertion: there is enough room in pending_buf. + private void put_bytes(byte[] p, int start, int len) + { + Array.Copy(p, start, pending, pendingCount, len); + pendingCount += len; + } + +#if NOTNEEDED + private void put_byte(byte c) + { + pending[pendingCount++] = c; + } + internal void put_short(int b) + { + unchecked + { + pending[pendingCount++] = (byte)b; + pending[pendingCount++] = (byte)(b >> 8); + } + } + internal void putShortMSB(int b) + { + unchecked + { + pending[pendingCount++] = (byte)(b >> 8); + pending[pendingCount++] = (byte)b; + } + } +#endif + + internal void send_code(int c, short[] tree) + { + int c2 = c * 2; + send_bits((tree[c2] & 0xffff), (tree[c2 + 1] & 0xffff)); + } + +#pragma warning disable 675 // workaround for Visual Studio 2015 compiler bug: https://github.com/dotnet/roslyn/issues/4027 + internal void send_bits(int value, int length) + { + int len = length; + unchecked + { + if (bi_valid > Buf_size - len) + { + //int val = value; + // bi_buf |= (val << bi_valid); + + bi_buf |= (short)((value << bi_valid) & 0xffff); + + //put_short(bi_buf); + pending[pendingCount++] = (byte)bi_buf; + pending[pendingCount++] = (byte)(bi_buf >> 8); + + bi_buf = (short)((uint)value >> (Buf_size - bi_valid)); + bi_valid += len - Buf_size; + } + else + { + // bi_buf |= (value) << bi_valid; + bi_buf |= (short)((value << bi_valid) & 0xffff); + bi_valid += len; + } + } + } +#pragma warning restore 675 + + // Send one empty static block to give enough lookahead for inflate. + // This takes 10 bits, of which 7 may remain in the bit buffer. + // The current inflate code requires 9 bits of lookahead. If the + // last two codes for the previous block (real code plus EOB) were coded + // on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode + // the last real code. In this case we send two empty static blocks instead + // of one. (There are no problems if the previous block is stored or fixed.) + // To simplify the code, we assume the worst case of last real code encoded + // on one bit only. + internal void _tr_align() + { + send_bits(STATIC_TREES << 1, 3); + send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes); + + bi_flush(); + + // Of the 10 bits for the empty block, we have already sent + // (10 - bi_valid) bits. The lookahead for the last real code (before + // the EOB of the previous block) was thus at least one plus the length + // of the EOB plus what we have just sent of the empty static block. + if (1 + last_eob_len + 10 - bi_valid < 9) + { + send_bits(STATIC_TREES << 1, 3); + send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes); + bi_flush(); + } + last_eob_len = 7; + } + + // Save the match info and tally the frequency counts. Return true if + // the current block must be flushed. + internal bool _tr_tally(int dist, int lc) + { + pending[_distanceOffset + last_lit * 2] = unchecked((byte)((uint)dist >> 8)); + pending[_distanceOffset + last_lit * 2 + 1] = unchecked((byte)dist); + pending[_lengthOffset + last_lit] = unchecked((byte)lc); + last_lit++; + + if (dist == 0) + { + // lc is the unmatched char + dyn_ltree[lc * 2]++; + } + else + { + matches++; + + // Here, lc is the match length - MIN_MATCH + dist--; // dist = match distance - 1 + dyn_ltree[(Tree.LengthCode[lc] + InternalConstants.LITERALS + 1) * 2]++; + dyn_dtree[Tree.DistanceCode(dist) * 2]++; + } + + if ((last_lit & 0x1fff) == 0 && (int)compressionLevel > 2) + { + // Compute an upper bound for the compressed length + int out_length = last_lit << 3; + int in_length = strstart - blockStart; + int dcode; + for (dcode = 0; dcode < InternalConstants.D_CODES; dcode++) + { + out_length = + (int)(out_length + dyn_dtree[dcode * 2] * (5L + ExtraDistanceBits[dcode])); + } + out_length >>= 3; + if ((matches < (last_lit / 2)) && out_length < in_length / 2) + { + return true; + } + } + + return (last_lit == lit_bufsize - 1) || (last_lit == lit_bufsize); + + // dinoch - wraparound? + // We avoid equality with lit_bufsize because of wraparound at 64K + // on 16 bit machines and because stored blocks are restricted to + // 64K-1 bytes. + } + + // Send the block data compressed using the given Huffman trees + internal void send_compressed_block(short[] ltree, short[] dtree) + { + int distance; // distance of matched string + int lc; // match length or unmatched char (if dist == 0) + int lx = 0; // running index in l_buf + int code; // the code to send + int extra; // number of extra bits to send + + if (last_lit != 0) + { + do + { + int ix = _distanceOffset + lx * 2; + distance = ((pending[ix] << 8) & 0xff00) | + (pending[ix + 1] & 0xff); + lc = (pending[_lengthOffset + lx]) & 0xff; + lx++; + + if (distance == 0) + { + send_code(lc, ltree); // send a literal byte + } + else + { + // literal or match pair + // Here, lc is the match length - MIN_MATCH + code = Tree.LengthCode[lc]; + + // send the length code + send_code(code + InternalConstants.LITERALS + 1, ltree); + extra = ExtraLengthBits[code]; + if (extra != 0) + { + // send the extra length bits + lc -= Tree.LengthBase[code]; + send_bits(lc, extra); + } + distance--; // dist is now the match distance - 1 + code = Tree.DistanceCode(distance); + + // send the distance code + send_code(code, dtree); + + extra = ExtraDistanceBits[code]; + if (extra != 0) + { + // send the extra distance bits + distance -= Tree.DistanceBase[code]; + send_bits(distance, extra); + } + } + + // Check that the overlay between pending and d_buf+l_buf is ok: + } + while (lx < last_lit); + } + + send_code(END_BLOCK, ltree); + last_eob_len = ltree[END_BLOCK * 2 + 1]; + } + + // Set the data type to ASCII or BINARY, using a crude approximation: + // binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise. + // IN assertion: the fields freq of dyn_ltree are set and the total of all + // frequencies does not exceed 64K (to fit in an int on 16 bit machines). + internal void set_data_type() + { + int n = 0; + int ascii_freq = 0; + int bin_freq = 0; + while (n < 7) + { + bin_freq += dyn_ltree[n * 2]; + n++; + } + while (n < 128) + { + ascii_freq += dyn_ltree[n * 2]; + n++; + } + while (n < InternalConstants.LITERALS) + { + bin_freq += dyn_ltree[n * 2]; + n++; + } + data_type = (sbyte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII); + } + + // Flush the bit buffer, keeping at most 7 bits in it. + internal void bi_flush() + { + if (bi_valid == 16) + { + pending[pendingCount++] = (byte)bi_buf; + pending[pendingCount++] = (byte)(bi_buf >> 8); + bi_buf = 0; + bi_valid = 0; + } + else if (bi_valid >= 8) + { + //put_byte((byte)bi_buf); + pending[pendingCount++] = (byte)bi_buf; + bi_buf >>= 8; + bi_valid -= 8; + } + } + + // Flush the bit buffer and align the output on a byte boundary + internal void bi_windup() + { + if (bi_valid > 8) + { + pending[pendingCount++] = (byte)bi_buf; + pending[pendingCount++] = (byte)(bi_buf >> 8); + } + else if (bi_valid > 0) + { + //put_byte((byte)bi_buf); + pending[pendingCount++] = (byte)bi_buf; + } + bi_buf = 0; + bi_valid = 0; + } + + // Copy a stored block, storing first the length and its + // one's complement if requested. + internal void copy_block(int buf, int len, bool header) + { + bi_windup(); // align on byte boundary + last_eob_len = 8; // enough lookahead for inflate + + if (header) + { + unchecked + { + //put_short((short)len); + pending[pendingCount++] = (byte)len; + pending[pendingCount++] = (byte)(len >> 8); + + //put_short((short)~len); + pending[pendingCount++] = (byte)~len; + pending[pendingCount++] = (byte)(~len >> 8); + } + } + + put_bytes(window, buf, len); + } + + internal void flush_block_only(bool eof) + { + _tr_flush_block(blockStart >= 0 ? blockStart : -1, strstart - blockStart, eof); + blockStart = strstart; + _codec.flush_pending(); + } + + // Copy without compression as much as possible from the input stream, return + // the current block state. + // This function does not insert new strings in the dictionary since + // uncompressible data is probably not useful. This function is used + // only for the level=0 compression option. + // NOTE: this function should be optimized to avoid extra copying from + // window to pending_buf. + internal BlockState DeflateNone(FlushType flush) + { + // Stored blocks are limited to 0xffff bytes, pending is limited + // to pending_buf_size, and each stored block has a 5 byte header: + + int max_block_size = 0xffff; + int max_start; + + if (max_block_size > pending.Length - 5) + { + max_block_size = pending.Length - 5; + } + + // Copy as much as possible from input to output: + while (true) + { + // Fill the window as much as possible: + if (lookahead <= 1) + { + _fillWindow(); + if (lookahead == 0 && flush == FlushType.None) + { + return BlockState.NeedMore; + } + if (lookahead == 0) + { + break; // flush the current block + } + } + + strstart += lookahead; + lookahead = 0; + + // Emit a stored block if pending will be full: + max_start = blockStart + max_block_size; + if (strstart == 0 || strstart >= max_start) + { + // strstart == 0 is possible when wraparound on 16-bit machine + lookahead = strstart - max_start; + strstart = max_start; + + flush_block_only(false); + if (_codec.AvailableBytesOut == 0) + { + return BlockState.NeedMore; + } + } + + // Flush if we may have to slide, otherwise block_start may become + // negative and the data will be gone: + if (strstart - blockStart >= w_size - MIN_LOOKAHEAD) + { + flush_block_only(false); + if (_codec.AvailableBytesOut == 0) + { + return BlockState.NeedMore; + } + } + } + + flush_block_only(flush == FlushType.Finish); + if (_codec.AvailableBytesOut == 0) + { + return (flush == FlushType.Finish) ? BlockState.FinishStarted : BlockState.NeedMore; + } + + return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone; + } + + // Send a stored block + internal void _tr_stored_block(int buf, int stored_len, bool eof) + { + send_bits((STORED_BLOCK << 1) + (eof ? 1 : 0), 3); // send block type + copy_block(buf, stored_len, true); // with header + } + + // Determine the best encoding for the current block: dynamic trees, static + // trees or store, and output the encoded block to the zip file. + internal void _tr_flush_block(int buf, int stored_len, bool eof) + { + int opt_lenb, static_lenb; // opt_len and static_len in bytes + int max_blindex = 0; // index of last bit length code of non zero freq + + // Build the Huffman trees unless a stored block is forced + if (compressionLevel > 0) + { + // Check if the file is ascii or binary + if (data_type == Z_UNKNOWN) + { + set_data_type(); + } + + // Construct the literal and distance trees + treeLiterals.build_tree(this); + + treeDistances.build_tree(this); + + // At this point, opt_len and static_len are the total bit lengths of + // the compressed block data, excluding the tree representations. + + // Build the bit length tree for the above two trees, and get the index + // in bl_order of the last bit length code to send. + max_blindex = BuildBlTree(); + + // Determine the best encoding. Compute first the block length in bytes + opt_lenb = (opt_len + 3 + 7) >> 3; + static_lenb = (static_len + 3 + 7) >> 3; + + if (static_lenb <= opt_lenb) + { + opt_lenb = static_lenb; + } + } + else + { + opt_lenb = static_lenb = stored_len + 5; // force a stored block + } + + if (stored_len + 4 <= opt_lenb && buf != -1) + { + // 4: two words for the lengths + // The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + // Otherwise we can't have processed more than WSIZE input bytes since + // the last block flush, because compression would have been + // successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + // transform a block into a stored block. + _tr_stored_block(buf, stored_len, eof); + } + else if (static_lenb == opt_lenb) + { + send_bits((STATIC_TREES << 1) + (eof ? 1 : 0), 3); + send_compressed_block(StaticTree.lengthAndLiteralsTreeCodes, StaticTree.distTreeCodes); + } + else + { + send_bits((DYN_TREES << 1) + (eof ? 1 : 0), 3); + send_all_trees(treeLiterals.max_code + 1, treeDistances.max_code + 1, max_blindex + 1); + send_compressed_block(dyn_ltree, dyn_dtree); + } + + // The above check is made mod 2^32, for files larger than 512 MB + // and uLong implemented on 32 bits. + + _InitializeBlocks(); + + if (eof) + { + bi_windup(); + } + } + + // Fill the window when the lookahead becomes insufficient. + // Updates strstart and lookahead. + // + // IN assertion: lookahead < MIN_LOOKAHEAD + // OUT assertions: strstart <= window_size-MIN_LOOKAHEAD + // At least one byte has been read, or avail_in == 0; reads are + // performed for at least two bytes (required for the zip translate_eol + // option -- not supported here). + private void _fillWindow() + { + int n, m; + int p; + int more; // Amount of free space at the end of the window. + + do + { + more = (window_size - lookahead - strstart); + + // Deal with !@#$% 64K limit: + if (more == 0 && strstart == 0 && lookahead == 0) + { + more = w_size; + } + else if (more == -1) + { + // Very unlikely, but possible on 16 bit machine if strstart == 0 + // and lookahead == 1 (input done one byte at time) + more--; + + // If the window is almost full and there is insufficient lookahead, + // move the upper half to the lower one to make room in the upper half. + } + else if (strstart >= w_size + w_size - MIN_LOOKAHEAD) + { + Array.Copy(window, w_size, window, 0, w_size); + match_start -= w_size; + strstart -= w_size; // we now have strstart >= MAX_DIST + blockStart -= w_size; + + // Slide the hash table (could be avoided with 32 bit values + // at the expense of memory usage). We slide even when level == 0 + // to keep the hash table consistent if we switch back to level > 0 + // later. (Using level 0 permanently is not an optimal usage of + // zlib, so we don't care about this pathological case.) + + n = hash_size; + p = n; + do + { + m = (head[--p] & 0xffff); + head[p] = (short)((m >= w_size) ? (m - w_size) : 0); + } + while (--n != 0); + + n = w_size; + p = n; + do + { + m = (prev[--p] & 0xffff); + prev[p] = (short)((m >= w_size) ? (m - w_size) : 0); + + // If n is not on any hash chain, prev[n] is garbage but + // its value will never be used. + } + while (--n != 0); + more += w_size; + } + + if (_codec.AvailableBytesIn == 0) + { + return; + } + + // If there was no sliding: + // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && + // more == window_size - lookahead - strstart + // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) + // => more >= window_size - 2*WSIZE + 2 + // In the BIG_MEM or MMAP case (not yet supported), + // window_size == input_size + MIN_LOOKAHEAD && + // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. + // Otherwise, window_size == 2*WSIZE so more >= 2. + // If there was sliding, more >= WSIZE. So in all cases, more >= 2. + + n = _codec.read_buf(window, strstart + lookahead, more); + lookahead += n; + + // Initialize the hash value now that we have some input: + if (lookahead >= MIN_MATCH) + { + ins_h = window[strstart] & 0xff; + ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask; + } + + // If the whole input has less than MIN_MATCH bytes, ins_h is garbage, + // but this is not important since only literal bytes will be emitted. + } + while (lookahead < MIN_LOOKAHEAD && _codec.AvailableBytesIn != 0); + } + + // Compress as much as possible from the input stream, return the current + // block state. + // This function does not perform lazy evaluation of matches and inserts + // new strings in the dictionary only for unmatched strings or for short + // matches. It is used only for the fast compression options. + internal BlockState DeflateFast(FlushType flush) + { + // short hash_head = 0; // head of the hash chain + int hash_head = 0; // head of the hash chain + bool bflush; // set if current block must be flushed + + while (true) + { + // Make sure that we always have enough lookahead, except + // at the end of the input file. We need MAX_MATCH bytes + // for the next match, plus MIN_MATCH bytes to insert the + // string following the next match. + if (lookahead < MIN_LOOKAHEAD) + { + _fillWindow(); + if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None) + { + return BlockState.NeedMore; + } + if (lookahead == 0) + { + break; // flush the current block + } + } + + // Insert the string window[strstart .. strstart+2] in the + // dictionary, and set hash_head to the head of the hash chain: + if (lookahead >= MIN_MATCH) + { + ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; + + // prev[strstart&w_mask]=hash_head=head[ins_h]; + hash_head = (head[ins_h] & 0xffff); + prev[strstart & w_mask] = head[ins_h]; + head[ins_h] = unchecked((short)strstart); + } + + // Find the longest match, discarding those <= prev_length. + // At this point we have always match_length < MIN_MATCH + + if (hash_head != 0L && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) + { + // To simplify the code, we prevent matches with the string + // of window index 0 (in particular we have to avoid a match + // of the string with itself at the start of the input file). + if (compressionStrategy != CompressionStrategy.HuffmanOnly) + { + match_length = longest_match(hash_head); + } + + // longest_match() sets match_start + } + if (match_length >= MIN_MATCH) + { + // check_match(strstart, match_start, match_length); + + bflush = _tr_tally(strstart - match_start, match_length - MIN_MATCH); + + lookahead -= match_length; + + // Insert new strings in the hash table only if the match length + // is not too large. This saves time but degrades compression. + if (match_length <= config.MaxLazy && lookahead >= MIN_MATCH) + { + match_length--; // string at strstart already in hash table + do + { + strstart++; + + ins_h = ((ins_h << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; + + // prev[strstart&w_mask]=hash_head=head[ins_h]; + hash_head = (head[ins_h] & 0xffff); + prev[strstart & w_mask] = head[ins_h]; + head[ins_h] = unchecked((short)strstart); + + // strstart never exceeds WSIZE-MAX_MATCH, so there are + // always MIN_MATCH bytes ahead. + } + while (--match_length != 0); + strstart++; + } + else + { + strstart += match_length; + match_length = 0; + ins_h = window[strstart] & 0xff; + + ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask; + + // If lookahead < MIN_MATCH, ins_h is garbage, but it does not + // matter since it will be recomputed at next deflate call. + } + } + else + { + // No match, output a literal byte + + bflush = _tr_tally(0, window[strstart] & 0xff); + lookahead--; + strstart++; + } + if (bflush) + { + flush_block_only(false); + if (_codec.AvailableBytesOut == 0) + { + return BlockState.NeedMore; + } + } + } + + flush_block_only(flush == FlushType.Finish); + if (_codec.AvailableBytesOut == 0) + { + if (flush == FlushType.Finish) + { + return BlockState.FinishStarted; + } + return BlockState.NeedMore; + } + return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone; + } + + // Same as above, but achieves better compression. We use a lazy + // evaluation for matches: a match is finally adopted only if there is + // no better match at the next window position. + internal BlockState DeflateSlow(FlushType flush) + { + // short hash_head = 0; // head of hash chain + int hash_head = 0; // head of hash chain + bool bflush; // set if current block must be flushed + + // Process the input block. + while (true) + { + // Make sure that we always have enough lookahead, except + // at the end of the input file. We need MAX_MATCH bytes + // for the next match, plus MIN_MATCH bytes to insert the + // string following the next match. + + if (lookahead < MIN_LOOKAHEAD) + { + _fillWindow(); + if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None) + { + return BlockState.NeedMore; + } + + if (lookahead == 0) + { + break; // flush the current block + } + } + + // Insert the string window[strstart .. strstart+2] in the + // dictionary, and set hash_head to the head of the hash chain: + + if (lookahead >= MIN_MATCH) + { + ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; + + // prev[strstart&w_mask]=hash_head=head[ins_h]; + hash_head = (head[ins_h] & 0xffff); + prev[strstart & w_mask] = head[ins_h]; + head[ins_h] = unchecked((short)strstart); + } + + // Find the longest match, discarding those <= prev_length. + prev_length = match_length; + prev_match = match_start; + match_length = MIN_MATCH - 1; + + if (hash_head != 0 && prev_length < config.MaxLazy && + ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) + { + // To simplify the code, we prevent matches with the string + // of window index 0 (in particular we have to avoid a match + // of the string with itself at the start of the input file). + + if (compressionStrategy != CompressionStrategy.HuffmanOnly) + { + match_length = longest_match(hash_head); + } + + // longest_match() sets match_start + + if (match_length <= 5 && (compressionStrategy == CompressionStrategy.Filtered || + (match_length == MIN_MATCH && strstart - match_start > 4096))) + { + // If prev_match is also MIN_MATCH, match_start is garbage + // but we will ignore the current match anyway. + match_length = MIN_MATCH - 1; + } + } + + // If there was a match at the previous step and the current + // match is not better, output the previous match: + if (prev_length >= MIN_MATCH && match_length <= prev_length) + { + int max_insert = strstart + lookahead - MIN_MATCH; + + // Do not insert strings in hash table beyond this. + + // check_match(strstart-1, prev_match, prev_length); + + bflush = _tr_tally(strstart - 1 - prev_match, prev_length - MIN_MATCH); + + // Insert in hash table all strings up to the end of the match. + // strstart-1 and strstart are already inserted. If there is not + // enough lookahead, the last two strings are not inserted in + // the hash table. + lookahead -= (prev_length - 1); + prev_length -= 2; + do + { + if (++strstart <= max_insert) + { + ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & + hash_mask; + + //prev[strstart&w_mask]=hash_head=head[ins_h]; + hash_head = (head[ins_h] & 0xffff); + prev[strstart & w_mask] = head[ins_h]; + head[ins_h] = unchecked((short)strstart); + } + } + while (--prev_length != 0); + match_available = 0; + match_length = MIN_MATCH - 1; + strstart++; + + if (bflush) + { + flush_block_only(false); + if (_codec.AvailableBytesOut == 0) + { + return BlockState.NeedMore; + } + } + } + else if (match_available != 0) + { + // If there was no match at the previous position, output a + // single literal. If there was a match but the current match + // is longer, truncate the previous match to a single literal. + + bflush = _tr_tally(0, window[strstart - 1] & 0xff); + + if (bflush) + { + flush_block_only(false); + } + strstart++; + lookahead--; + if (_codec.AvailableBytesOut == 0) + { + return BlockState.NeedMore; + } + } + else + { + // There is no previous match to compare with, wait for + // the next step to decide. + + match_available = 1; + strstart++; + lookahead--; + } + } + + if (match_available != 0) + { + bflush = _tr_tally(0, window[strstart - 1] & 0xff); + match_available = 0; + } + flush_block_only(flush == FlushType.Finish); + + if (_codec.AvailableBytesOut == 0) + { + if (flush == FlushType.Finish) + { + return BlockState.FinishStarted; + } + return BlockState.NeedMore; + } + + return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone; + } + + internal int longest_match(int cur_match) + { + int chain_length = config.MaxChainLength; // max hash chain length + int scan = strstart; // current string + int match; // matched string + int len; // length of current match + int best_len = prev_length; // best match length so far + int limit = strstart > (w_size - MIN_LOOKAHEAD) ? strstart - (w_size - MIN_LOOKAHEAD) : 0; + + int niceLength = config.NiceLength; + + // Stop when cur_match becomes <= limit. To simplify the code, + // we prevent matches with the string of window index 0. + + int wmask = w_mask; + + int strend = strstart + MAX_MATCH; + byte scan_end1 = window[scan + best_len - 1]; + byte scan_end = window[scan + best_len]; + + // The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + // It is easy to get rid of this optimization if necessary. + + // Do not waste too much time if we already have a good match: + if (prev_length >= config.GoodLength) + { + chain_length >>= 2; + } + + // Do not look for matches beyond the end of the input. This is necessary + // to make deflate deterministic. + if (niceLength > lookahead) + { + niceLength = lookahead; + } + + do + { + match = cur_match; + + // Skip to next match if the match length cannot increase + // or if the match length is less than 2: + if (window[match + best_len] != scan_end || + window[match + best_len - 1] != scan_end1 || + window[match] != window[scan] || + window[++match] != window[scan + 1]) + { + continue; + } + + // The check at best_len-1 can be removed because it will be made + // again later. (This heuristic is not always a win.) + // It is not necessary to compare scan[2] and match[2] since they + // are always equal when the other bytes match, given that + // the hash keys are equal and that HASH_BITS >= 8. + scan += 2; + match++; + + // We check for insufficient lookahead only every 8th comparison; + // the 256th check will be made at strstart+258. + do + { + } + while (window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && + window[++scan] == window[++match] && scan < strend); + + len = MAX_MATCH - (strend - scan); + scan = strend - MAX_MATCH; + + if (len > best_len) + { + match_start = cur_match; + best_len = len; + if (len >= niceLength) + { + break; + } + scan_end1 = window[scan + best_len - 1]; + scan_end = window[scan + best_len]; + } + } + while ((cur_match = (prev[cur_match & wmask] & 0xffff)) > limit && --chain_length != 0); + + if (best_len <= lookahead) + { + return best_len; + } + return lookahead; + } + + private bool Rfc1950BytesEmitted; + + internal bool WantRfc1950HeaderBytes { get; set; } = true; + + internal int Initialize(ZlibCodec codec, CompressionLevel level) + { + return Initialize(codec, level, ZlibConstants.WindowBitsMax); + } + + internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits) + { + return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, CompressionStrategy.Default); + } + + internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits, + CompressionStrategy compressionStrategy) + { + return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, compressionStrategy); + } + + internal int Initialize(ZlibCodec codec, CompressionLevel level, int windowBits, int memLevel, + CompressionStrategy strategy) + { + _codec = codec; + _codec.Message = null; + + // validation + if (windowBits < 9 || windowBits > 15) + { + throw new ZlibException("windowBits must be in the range 9..15."); + } + + if (memLevel < 1 || memLevel > MEM_LEVEL_MAX) + { + throw new ZlibException(String.Format("memLevel must be in the range 1.. {0}", MEM_LEVEL_MAX)); + } + + _codec.dstate = this; + + w_bits = windowBits; + w_size = 1 << w_bits; + w_mask = w_size - 1; + + hash_bits = memLevel + 7; + hash_size = 1 << hash_bits; + hash_mask = hash_size - 1; + hash_shift = ((hash_bits + MIN_MATCH - 1) / MIN_MATCH); + + window = new byte[w_size * 2]; + prev = new short[w_size]; + head = new short[hash_size]; + + // for memLevel==8, this will be 16384, 16k + lit_bufsize = 1 << (memLevel + 6); + + // Use a single array as the buffer for data pending compression, + // the output distance codes, and the output length codes (aka tree). + // orig comment: This works just fine since the average + // output size for (length,distance) codes is <= 24 bits. + pending = new byte[lit_bufsize * 4]; + _distanceOffset = lit_bufsize; + _lengthOffset = (1 + 2) * lit_bufsize; + + // So, for memLevel 8, the length of the pending buffer is 65536. 64k. + // The first 16k are pending bytes. + // The middle slice, of 32k, is used for distance codes. + // The final 16k are length codes. + + compressionLevel = level; + compressionStrategy = strategy; + + Reset(); + return ZlibConstants.Z_OK; + } + + internal void Reset() + { + _codec.TotalBytesIn = _codec.TotalBytesOut = 0; + _codec.Message = null; + + //strm.data_type = Z_UNKNOWN; + + pendingCount = 0; + nextPending = 0; + + Rfc1950BytesEmitted = false; + + status = (WantRfc1950HeaderBytes) ? INIT_STATE : BUSY_STATE; + _codec._Adler32 = Adler.Adler32(0, null, 0, 0); + + last_flush = (int)FlushType.None; + + _InitializeTreeData(); + _InitializeLazyMatch(); + } + + internal int End() + { + if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE) + { + return ZlibConstants.Z_STREAM_ERROR; + } + + // Deallocate in reverse order of allocations: + pending = null; + head = null; + prev = null; + window = null; + + // free + // dstate=null; + return status == BUSY_STATE ? ZlibConstants.Z_DATA_ERROR : ZlibConstants.Z_OK; + } + + private void SetDeflater() + { + switch (config.Flavor) + { + case DeflateFlavor.Store: + DeflateFunction = DeflateNone; + break; + case DeflateFlavor.Fast: + DeflateFunction = DeflateFast; + break; + case DeflateFlavor.Slow: + DeflateFunction = DeflateSlow; + break; + } + } + + internal int SetParams(CompressionLevel level, CompressionStrategy strategy) + { + int result = ZlibConstants.Z_OK; + + if (compressionLevel != level) + { + Config newConfig = Config.Lookup(level); + + // change in the deflate flavor (Fast vs slow vs none)? + if (newConfig.Flavor != config.Flavor && _codec.TotalBytesIn != 0) + { + // Flush the last buffer: + result = _codec.Deflate(FlushType.Partial); + } + + compressionLevel = level; + config = newConfig; + SetDeflater(); + } + + // no need to flush with change in strategy? Really? + compressionStrategy = strategy; + + return result; + } + + internal int SetDictionary(byte[] dictionary) + { + int length = dictionary.Length; + int index = 0; + + if (dictionary == null || status != INIT_STATE) + { + throw new ZlibException("Stream error."); + } + + _codec._Adler32 = Adler.Adler32(_codec._Adler32, dictionary, 0, dictionary.Length); + + if (length < MIN_MATCH) + { + return ZlibConstants.Z_OK; + } + if (length > w_size - MIN_LOOKAHEAD) + { + length = w_size - MIN_LOOKAHEAD; + index = dictionary.Length - length; // use the tail of the dictionary + } + Array.Copy(dictionary, index, window, 0, length); + strstart = length; + blockStart = length; + + // Insert all strings in the hash table (except for the last two bytes). + // s->lookahead stays null, so s->ins_h will be recomputed at the next + // call of fill_window. + + ins_h = window[0] & 0xff; + ins_h = (((ins_h) << hash_shift) ^ (window[1] & 0xff)) & hash_mask; + + for (int n = 0; n <= length - MIN_MATCH; n++) + { + ins_h = (((ins_h) << hash_shift) ^ (window[(n) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; + prev[n & w_mask] = head[ins_h]; + head[ins_h] = (short)n; + } + return ZlibConstants.Z_OK; + } + + internal int Deflate(FlushType flush) + { + int old_flush; + + if (_codec.OutputBuffer == null || + (_codec.InputBuffer == null && _codec.AvailableBytesIn != 0) || + (status == FINISH_STATE && flush != FlushType.Finish)) + { + _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_STREAM_ERROR)]; + throw new ZlibException(String.Format("Something is fishy. [{0}]", _codec.Message)); + + //return ZlibConstants.Z_STREAM_ERROR; + } + if (_codec.AvailableBytesOut == 0) + { + _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)]; + throw new ZlibException("OutputBuffer is full (AvailableBytesOut == 0)"); + + //return ZlibConstants.Z_BUF_ERROR; + } + + old_flush = last_flush; + last_flush = (int)flush; + + // Write the zlib (rfc1950) header bytes + if (status == INIT_STATE) + { + int header = (Z_DEFLATED + ((w_bits - 8) << 4)) << 8; + int level_flags = (((int)compressionLevel - 1) & 0xff) >> 1; + + if (level_flags > 3) + { + level_flags = 3; + } + header |= (level_flags << 6); + if (strstart != 0) + { + header |= PRESET_DICT; + } + header += 31 - (header % 31); + + status = BUSY_STATE; + + //putShortMSB(header); + unchecked + { + pending[pendingCount++] = (byte)(header >> 8); + pending[pendingCount++] = (byte)header; + } + + // Save the adler32 of the preset dictionary: + if (strstart != 0) + { + ////putShortMSB((int)(SharedUtils.URShift(_codec._Adler32, 16))); + //putShortMSB((int)((UInt64)_codec._Adler32 >> 16)); + //putShortMSB((int)(_codec._Adler32 & 0xffff)); + pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24); + pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16); + pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8); + pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF); + } + _codec._Adler32 = Adler.Adler32(0, null, 0, 0); + } + + // Flush as much pending output as possible + if (pendingCount != 0) + { + _codec.flush_pending(); + if (_codec.AvailableBytesOut == 0) + { + //System.out.println(" avail_out==0"); + // Since avail_out is 0, deflate will be called again with + // more output space, but possibly with both pending and + // avail_in equal to zero. There won't be anything to do, + // but this is not an error situation so make sure we + // return OK instead of BUF_ERROR at next call of deflate: + last_flush = -1; + return ZlibConstants.Z_OK; + } + + // Make sure there is something to do and avoid duplicate consecutive + // flushes. For repeated and useless calls with Z_FINISH, we keep + // returning Z_STREAM_END instead of Z_BUFF_ERROR. + } + else if (_codec.AvailableBytesIn == 0 && + (int)flush <= old_flush && + flush != FlushType.Finish) + { + // workitem 8557 + // Not sure why this needs to be an error. + // pendingCount == 0, which means there's nothing to deflate. + // And the caller has not asked for a FlushType.Finish, but... + // that seems very non-fatal. We can just say "OK" and do nthing. + + // _codec.Message = z_errmsg[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)]; + // throw new ZlibException("AvailableBytesIn == 0 && flush<=old_flush && flush != FlushType.Finish"); + + return ZlibConstants.Z_OK; + } + + // User must not provide more input after the first FINISH: + if (status == FINISH_STATE && _codec.AvailableBytesIn != 0) + { + _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)]; + throw new ZlibException("status == FINISH_STATE && _codec.AvailableBytesIn != 0"); + } + + // Start a new block or continue the current one. + if (_codec.AvailableBytesIn != 0 || lookahead != 0 || (flush != FlushType.None && status != FINISH_STATE)) + { + BlockState bstate = DeflateFunction(flush); + + if (bstate == BlockState.FinishStarted || bstate == BlockState.FinishDone) + { + status = FINISH_STATE; + } + if (bstate == BlockState.NeedMore || bstate == BlockState.FinishStarted) + { + if (_codec.AvailableBytesOut == 0) + { + last_flush = -1; // avoid BUF_ERROR next call, see above + } + return ZlibConstants.Z_OK; + + // If flush != Z_NO_FLUSH && avail_out == 0, the next call + // of deflate should use the same flush parameter to make sure + // that the flush is complete. So we don't have to output an + // empty block here, this will be done at next call. This also + // ensures that for a very small output buffer, we emit at most + // one empty block. + } + + if (bstate == BlockState.BlockDone) + { + if (flush == FlushType.Partial) + { + _tr_align(); + } + else + { + // FlushType.Full or FlushType.Sync + _tr_stored_block(0, 0, false); + + // For a full flush, this empty block will be recognized + // as a special marker by inflate_sync(). + if (flush == FlushType.Full) + { + // clear hash (forget the history) + for (int i = 0; i < hash_size; i++) + { + head[i] = 0; + } + } + } + _codec.flush_pending(); + if (_codec.AvailableBytesOut == 0) + { + last_flush = -1; // avoid BUF_ERROR at next call, see above + return ZlibConstants.Z_OK; + } + } + } + + if (flush != FlushType.Finish) + { + return ZlibConstants.Z_OK; + } + + if (!WantRfc1950HeaderBytes || Rfc1950BytesEmitted) + { + return ZlibConstants.Z_STREAM_END; + } + + // Write the zlib trailer (adler32) + pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24); + pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16); + pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8); + pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF); + + //putShortMSB((int)(SharedUtils.URShift(_codec._Adler32, 16))); + //putShortMSB((int)(_codec._Adler32 & 0xffff)); + + _codec.flush_pending(); + + // If avail_out is zero, the application will call deflate again + // to flush the rest. + + Rfc1950BytesEmitted = true; // write the trailer only once! + + return pendingCount != 0 ? ZlibConstants.Z_OK : ZlibConstants.Z_STREAM_END; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/DeflateStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/DeflateStream.cs new file mode 100644 index 0000000000..e095f46069 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/DeflateStream.cs @@ -0,0 +1,366 @@ +// DeflateStream.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009-2010 Dino Chiesa. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2010-February-05 08:49:04> +// +// ------------------------------------------------------------------ +// +// This module defines the DeflateStream class, which can be used as a replacement for +// the System.IO.Compression.DeflateStream class in the .NET BCL. +// +// ------------------------------------------------------------------ + +using System; +using System.IO; +using System.Text; + +namespace SharpCompress.Compressors.Deflate +{ + public class DeflateStream : Stream + { + private readonly ZlibBaseStream _baseStream; + private bool _disposed; + + public DeflateStream(Stream stream, CompressionMode mode, + CompressionLevel level = CompressionLevel.Default, + Encoding forceEncoding = null) + { + _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, forceEncoding); + } + + #region Zlib properties + + /// + /// This property sets the flush behavior on the stream. + /// + /// See the ZLIB documentation for the meaning of the flush behavior. + /// + public virtual FlushType FlushMode + { + get => (_baseStream._flushMode); + set + { + if (_disposed) + { + throw new ObjectDisposedException("DeflateStream"); + } + _baseStream._flushMode = value; + } + } + + /// + /// The size of the working buffer for the compression codec. + /// + /// + /// + /// + /// The working buffer is used for all stream operations. The default size is + /// 1024 bytes. The minimum size is 128 bytes. You may get better performance + /// with a larger buffer. Then again, you might not. You would have to test + /// it. + /// + /// + /// + /// Set this before the first call to Read() or Write() on the + /// stream. If you try to set it afterwards, it will throw. + /// + /// + public int BufferSize + { + get => _baseStream._bufferSize; + set + { + if (_disposed) + { + throw new ObjectDisposedException("DeflateStream"); + } + if (_baseStream._workingBuffer != null) + { + throw new ZlibException("The working buffer is already set."); + } + if (value < ZlibConstants.WorkingBufferSizeMin) + { + throw new ZlibException( + String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, + ZlibConstants.WorkingBufferSizeMin)); + } + _baseStream._bufferSize = value; + } + } + + /// + /// The ZLIB strategy to be used during compression. + /// + /// + /// + /// By tweaking this parameter, you may be able to optimize the compression for + /// data with particular characteristics. + /// + public CompressionStrategy Strategy + { + get => _baseStream.Strategy; + set + { + if (_disposed) + { + throw new ObjectDisposedException("DeflateStream"); + } + _baseStream.Strategy = value; + } + } + + /// Returns the total number of bytes input so far. + public virtual long TotalIn => _baseStream._z.TotalBytesIn; + + /// Returns the total number of bytes output so far. + public virtual long TotalOut => _baseStream._z.TotalBytesOut; + + #endregion + + #region System.IO.Stream methods + + /// + /// Indicates whether the stream can be read. + /// + /// + /// The return value depends on whether the captive stream supports reading. + /// + public override bool CanRead + { + get + { + if (_disposed) + { + throw new ObjectDisposedException("DeflateStream"); + } + return _baseStream._stream.CanRead; + } + } + + /// + /// Indicates whether the stream supports Seek operations. + /// + /// + /// Always returns false. + /// + public override bool CanSeek => false; + + /// + /// Indicates whether the stream can be written. + /// + /// + /// The return value depends on whether the captive stream supports writing. + /// + public override bool CanWrite + { + get + { + if (_disposed) + { + throw new ObjectDisposedException("DeflateStream"); + } + return _baseStream._stream.CanWrite; + } + } + + /// + /// Reading this property always throws a . + /// + public override long Length => throw new NotSupportedException(); + + /// + /// The position of the stream pointer. + /// + /// + /// + /// Setting this property always throws a . Reading will return the total bytes + /// written out, if used in writing, or the total bytes read in, if used in + /// reading. The count may refer to compressed bytes or uncompressed bytes, + /// depending on how you've used the stream. + /// + public override long Position + { + get + { + if (_baseStream._streamMode == ZlibBaseStream.StreamMode.Writer) + { + return _baseStream._z.TotalBytesOut; + } + if (_baseStream._streamMode == ZlibBaseStream.StreamMode.Reader) + { + return _baseStream._z.TotalBytesIn; + } + return 0; + } + set => throw new NotSupportedException(); + } + + /// + /// Dispose the stream. + /// + /// + /// This may or may not result in a Close() call on the captive stream. + /// + protected override void Dispose(bool disposing) + { + try + { + if (!_disposed) + { + if (disposing) + { + _baseStream?.Dispose(); + } + _disposed = true; + } + } + finally + { + base.Dispose(disposing); + } + } + + /// + /// Flush the stream. + /// + public override void Flush() + { + if (_disposed) + { + throw new ObjectDisposedException("DeflateStream"); + } + _baseStream.Flush(); + } + + /// + /// Read data from the stream. + /// + /// + /// + /// + /// If you wish to use the DeflateStream to compress data while + /// reading, you can create a DeflateStream with + /// CompressionMode.Compress, providing an uncompressed data stream. + /// Then call Read() on that DeflateStream, and the data read will be + /// compressed as you read. If you wish to use the DeflateStream to + /// decompress data while reading, you can create a DeflateStream with + /// CompressionMode.Decompress, providing a readable compressed data + /// stream. Then call Read() on that DeflateStream, and the data read + /// will be decompressed as you read. + /// + /// + /// + /// A DeflateStream can be used for Read() or Write(), but not both. + /// + /// + /// + /// The buffer into which the read data should be placed. + /// the offset within that data array to put the first byte read. + /// the number of bytes to read. + /// the number of bytes actually read + public override int Read(byte[] buffer, int offset, int count) + { + if (_disposed) + { + throw new ObjectDisposedException("DeflateStream"); + } + return _baseStream.Read(buffer, offset, count); + } + + public override int ReadByte() + { + if (_disposed) + { + throw new ObjectDisposedException("DeflateStream"); + } + return _baseStream.ReadByte(); + } + + /// + /// Calling this method always throws a . + /// + /// this is irrelevant, since it will always throw! + /// this is irrelevant, since it will always throw! + /// irrelevant! + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + /// + /// Calling this method always throws a . + /// + /// this is irrelevant, since it will always throw! + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + /// + /// Write data to the stream. + /// + /// + /// + /// + /// If you wish to use the DeflateStream to compress data while + /// writing, you can create a DeflateStream with + /// CompressionMode.Compress, and a writable output stream. Then call + /// Write() on that DeflateStream, providing uncompressed data + /// as input. The data sent to the output stream will be the compressed form + /// of the data written. If you wish to use the DeflateStream to + /// decompress data while writing, you can create a DeflateStream with + /// CompressionMode.Decompress, and a writable output stream. Then + /// call Write() on that stream, providing previously compressed + /// data. The data sent to the output stream will be the decompressed form of + /// the data written. + /// + /// + /// + /// A DeflateStream can be used for Read() or Write(), + /// but not both. + /// + /// + /// + /// + /// The buffer holding data to write to the stream. + /// the offset within that data array to find the first byte to write. + /// the number of bytes to write. + public override void Write(byte[] buffer, int offset, int count) + { + if (_disposed) + { + throw new ObjectDisposedException("DeflateStream"); + } + _baseStream.Write(buffer, offset, count); + } + + public override void WriteByte(byte value) + { + if (_disposed) + { + throw new ObjectDisposedException("DeflateStream"); + } + _baseStream.WriteByte(value); + } + + #endregion + + public MemoryStream InputBuffer => new MemoryStream(_baseStream._z.InputBuffer, _baseStream._z.NextIn, + _baseStream._z.AvailableBytesIn); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/FlushType.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/FlushType.cs new file mode 100644 index 0000000000..9a3cc77476 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/FlushType.cs @@ -0,0 +1,44 @@ +namespace SharpCompress.Compressors.Deflate +{ + /// + /// Describes how to flush the current deflate operation. + /// + /// + /// The different FlushType values are useful when using a Deflate in a streaming application. + /// + public enum FlushType + { + /// No flush at all. + None = 0, + + /// Closes the current block, but doesn't flush it to + /// the output. Used internally only in hypothetical + /// scenarios. This was supposed to be removed by Zlib, but it is + /// still in use in some edge cases. + /// + Partial, + + /// + /// Use this during compression to specify that all pending output should be + /// flushed to the output buffer and the output should be aligned on a byte + /// boundary. You might use this in a streaming communication scenario, so that + /// the decompressor can get all input data available so far. When using this + /// with a ZlibCodec, AvailableBytesIn will be zero after the call if + /// enough output space has been provided before the call. Flushing will + /// degrade compression and so it should be used only when necessary. + /// + Sync, + + /// + /// Use this during compression to specify that all output should be flushed, as + /// with FlushType.Sync, but also, the compression state should be reset + /// so that decompression can restart from this point if previous compressed + /// data has been damaged or if random access is desired. Using + /// FlushType.Full too often can significantly degrade the compression. + /// + Full, + + /// Signals the end of the compression/decompression stream. + Finish + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/GZipStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/GZipStream.cs new file mode 100644 index 0000000000..decbc8ad40 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/GZipStream.cs @@ -0,0 +1,479 @@ +// GZipStream.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2010-January-09 12:04:28> +// +// ------------------------------------------------------------------ +// +// This module defines the GZipStream class, which can be used as a replacement for +// the System.IO.Compression.GZipStream class in the .NET BCL. NB: The design is not +// completely OO clean: there is some intelligence in the ZlibBaseStream that reads the +// GZip header. +// +// ------------------------------------------------------------------ + +using System; +using System.IO; +using SharpCompress.Common; +using SharpCompress.Converters; +using System.Text; + +namespace SharpCompress.Compressors.Deflate +{ + public class GZipStream : Stream + { + internal static readonly DateTime UNIX_EPOCH = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); + + public DateTime? LastModified { get; set; } + + private string _comment; + private string _fileName; + + internal ZlibBaseStream BaseStream; + private bool _disposed; + private bool _firstReadDone; + private int _headerByteCount; + + private readonly Encoding _encoding; + + public GZipStream(Stream stream, CompressionMode mode) + : this(stream, mode, CompressionLevel.Default, Encoding.UTF8) + { + } + + public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level) + : this(stream, mode, level, Encoding.UTF8) + { + } + + public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, Encoding encoding) + { + BaseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, encoding); + _encoding = encoding; + } + + #region Zlib properties + + public virtual FlushType FlushMode + { + get => (BaseStream._flushMode); + set + { + if (_disposed) + { + throw new ObjectDisposedException("GZipStream"); + } + BaseStream._flushMode = value; + } + } + + public int BufferSize + { + get => BaseStream._bufferSize; + set + { + if (_disposed) + { + throw new ObjectDisposedException("GZipStream"); + } + if (BaseStream._workingBuffer != null) + { + throw new ZlibException("The working buffer is already set."); + } + if (value < ZlibConstants.WorkingBufferSizeMin) + { + throw new ZlibException( + String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, + ZlibConstants.WorkingBufferSizeMin)); + } + BaseStream._bufferSize = value; + } + } + + internal virtual long TotalIn => BaseStream._z.TotalBytesIn; + + internal virtual long TotalOut => BaseStream._z.TotalBytesOut; + + #endregion + + #region Stream methods + + /// + /// Indicates whether the stream can be read. + /// + /// + /// The return value depends on whether the captive stream supports reading. + /// + public override bool CanRead + { + get + { + if (_disposed) + { + throw new ObjectDisposedException("GZipStream"); + } + return BaseStream._stream.CanRead; + } + } + + /// + /// Indicates whether the stream supports Seek operations. + /// + /// + /// Always returns false. + /// + public override bool CanSeek => false; + + /// + /// Indicates whether the stream can be written. + /// + /// + /// The return value depends on whether the captive stream supports writing. + /// + public override bool CanWrite + { + get + { + if (_disposed) + { + throw new ObjectDisposedException("GZipStream"); + } + return BaseStream._stream.CanWrite; + } + } + + /// + /// Reading this property always throws a . + /// + public override long Length => throw new NotSupportedException(); + + /// + /// The position of the stream pointer. + /// + /// + /// + /// Setting this property always throws a . Reading will return the total bytes + /// written out, if used in writing, or the total bytes read in, if used in + /// reading. The count may refer to compressed bytes or uncompressed bytes, + /// depending on how you've used the stream. + /// + public override long Position + { + get + { + if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Writer) + { + return BaseStream._z.TotalBytesOut + _headerByteCount; + } + if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Reader) + { + return BaseStream._z.TotalBytesIn + BaseStream._gzipHeaderByteCount; + } + return 0; + } + + set => throw new NotSupportedException(); + } + + /// + /// Dispose the stream. + /// + /// + /// This may or may not result in a Close() call on the captive stream. + /// + protected override void Dispose(bool disposing) + { + try + { + if (!_disposed) + { + if (disposing && (BaseStream != null)) + { + BaseStream.Dispose(); + Crc32 = BaseStream.Crc32; + } + _disposed = true; + } + } + finally + { + base.Dispose(disposing); + } + } + + /// + /// Flush the stream. + /// + public override void Flush() + { + if (_disposed) + { + throw new ObjectDisposedException("GZipStream"); + } + BaseStream.Flush(); + } + + /// + /// Read and decompress data from the source stream. + /// + /// + /// + /// With a GZipStream, decompression is done through reading. + /// + /// + /// + /// + /// byte[] working = new byte[WORKING_BUFFER_SIZE]; + /// using (System.IO.Stream input = System.IO.File.OpenRead(_CompressedFile)) + /// { + /// using (Stream decompressor= new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, true)) + /// { + /// using (var output = System.IO.File.Create(_DecompressedFile)) + /// { + /// int n; + /// while ((n= decompressor.Read(working, 0, working.Length)) !=0) + /// { + /// output.Write(working, 0, n); + /// } + /// } + /// } + /// } + /// + /// + /// The buffer into which the decompressed data should be placed. + /// the offset within that data array to put the first byte read. + /// the number of bytes to read. + /// the number of bytes actually read + public override int Read(byte[] buffer, int offset, int count) + { + if (_disposed) + { + throw new ObjectDisposedException("GZipStream"); + } + int n = BaseStream.Read(buffer, offset, count); + + // Console.WriteLine("GZipStream::Read(buffer, off({0}), c({1}) = {2}", offset, count, n); + // Console.WriteLine( Util.FormatByteArray(buffer, offset, n) ); + + if (!_firstReadDone) + { + _firstReadDone = true; + FileName = BaseStream._GzipFileName; + Comment = BaseStream._GzipComment; + } + return n; + } + + /// + /// Calling this method always throws a . + /// + /// irrelevant; it will always throw! + /// irrelevant; it will always throw! + /// irrelevant! + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + /// + /// Calling this method always throws a . + /// + /// irrelevant; this method will always throw! + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + /// + /// Write data to the stream. + /// + /// + /// + /// + /// If you wish to use the GZipStream to compress data while writing, + /// you can create a GZipStream with CompressionMode.Compress, and a + /// writable output stream. Then call Write() on that GZipStream, + /// providing uncompressed data as input. The data sent to the output stream + /// will be the compressed form of the data written. + /// + /// + /// + /// A GZipStream can be used for Read() or Write(), but not + /// both. Writing implies compression. Reading implies decompression. + /// + /// + /// + /// The buffer holding data to write to the stream. + /// the offset within that data array to find the first byte to write. + /// the number of bytes to write. + public override void Write(byte[] buffer, int offset, int count) + { + if (_disposed) + { + throw new ObjectDisposedException("GZipStream"); + } + if (BaseStream._streamMode == ZlibBaseStream.StreamMode.Undefined) + { + //Console.WriteLine("GZipStream: First write"); + if (BaseStream._wantCompress) + { + // first write in compression, therefore, emit the GZIP header + _headerByteCount = EmitHeader(); + } + else + { + throw new InvalidOperationException(); + } + } + + BaseStream.Write(buffer, offset, count); + } + + #endregion Stream methods + + public String Comment + { + get => _comment; + set + { + if (_disposed) + { + throw new ObjectDisposedException("GZipStream"); + } + _comment = value; + } + } + + public string FileName + { + get => _fileName; + set + { + if (_disposed) + { + throw new ObjectDisposedException("GZipStream"); + } + _fileName = value; + if (_fileName == null) + { + return; + } + if (_fileName.IndexOf("/") != -1) + { + _fileName = _fileName.Replace("/", "\\"); + } + if (_fileName.EndsWith("\\")) + { + throw new InvalidOperationException("Illegal filename"); + } + + var index = _fileName.IndexOf("\\"); + if (index != -1) + { + // trim any leading path + int length = _fileName.Length; + int num = length; + while (--num >= 0) + { + char c = _fileName[num]; + if (c == '\\') + { + _fileName = _fileName.Substring(num + 1, length - num - 1); + } + } + } + } + } + + public int Crc32 { get; private set; } + + private int EmitHeader() + { + byte[] commentBytes = (Comment == null) ? null + : _encoding.GetBytes(Comment); + byte[] filenameBytes = (FileName == null) ? null + : _encoding.GetBytes(FileName); + + int cbLength = (Comment == null) ? 0 : commentBytes.Length + 1; + int fnLength = (FileName == null) ? 0 : filenameBytes.Length + 1; + + int bufferLength = 10 + cbLength + fnLength; + var header = new byte[bufferLength]; + int i = 0; + + // ID + header[i++] = 0x1F; + header[i++] = 0x8B; + + // compression method + header[i++] = 8; + byte flag = 0; + if (Comment != null) + { + flag ^= 0x10; + } + if (FileName != null) + { + flag ^= 0x8; + } + + // flag + header[i++] = flag; + + // mtime + if (!LastModified.HasValue) + { + LastModified = DateTime.Now; + } + TimeSpan delta = LastModified.Value - UNIX_EPOCH; + var timet = (Int32)delta.TotalSeconds; + DataConverter.LittleEndian.PutBytes(header, i, timet); + i += 4; + + // xflg + header[i++] = 0; // this field is totally useless + + // OS + header[i++] = 0xFF; // 0xFF == unspecified + + // extra field length - only if FEXTRA is set, which it is not. + //header[i++]= 0; + //header[i++]= 0; + + // filename + if (fnLength != 0) + { + Array.Copy(filenameBytes, 0, header, i, fnLength - 1); + i += fnLength - 1; + header[i++] = 0; // terminate + } + + // comment + if (cbLength != 0) + { + Array.Copy(commentBytes, 0, header, i, cbLength - 1); + i += cbLength - 1; + header[i++] = 0; // terminate + } + + BaseStream._stream.Write(header, 0, header.Length); + + return header.Length; // bytes written + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/InfTree.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/InfTree.cs new file mode 100644 index 0000000000..a144ee4a8d --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/InfTree.cs @@ -0,0 +1,576 @@ +// Inftree.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-October-28 12:43:54> +// +// ------------------------------------------------------------------ +// +// This module defines classes used in decompression. This code is derived +// from the jzlib implementation of zlib. In keeping with the license for jzlib, +// the copyright to that code is below. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + +using System; + +namespace SharpCompress.Compressors.Deflate +{ + internal sealed class InfTree + { + private const int MANY = 1440; + + private const int Z_OK = 0; + private const int Z_STREAM_END = 1; + private const int Z_NEED_DICT = 2; + private const int Z_ERRNO = -1; + private const int Z_STREAM_ERROR = -2; + private const int Z_DATA_ERROR = -3; + private const int Z_MEM_ERROR = -4; + private const int Z_BUF_ERROR = -5; + private const int Z_VERSION_ERROR = -6; + + internal const int fixed_bl = 9; + internal const int fixed_bd = 5; + internal const int BMAX = 15; // maximum bit length of any code + + //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_tl'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] fixed_tl = + { + 96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, + 112, 0, 8, 48, 0, 9, 192, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, + 160, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 224, 80, 7, 6, 0, 8, + 88, 0, 8, 24, 0, 9, 144, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, + 208, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 176, 0, 8, 8, 0, 8, + 136, 0, 8, 72, 0, 9, 240, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, + 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 200, 81, 7, 13, 0, + 8, 100, 0, 8, 36, 0, 9, 168, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, + 9, 232, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 152, 84, 7, 83, 0, + 8, 124, 0, 8, 60, 0, 9, 216, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0 + , 9, 184, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 248, 80, 7, 3, + 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50 + , 0, 9, 196, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 164, 0, 8, 2 + , 0, 8, 130, 0, 8, 66, 0, 9, 228, 80, 7, 7, 0, 8, 90, 0, 8, 26 + , 0, 9, 148, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 212, 82, 7, + 19, 0, 8, 106, 0, 8, 42, 0, 9, 180, 0, 8, 10, 0, 8, 138, 0, 8, + 74, 0, 9, 244, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, + 51, 0, 8, 118, 0, 8, 54, 0, 9, 204, 81, 7, 15, 0, 8, 102, 0, 8 + , 38, 0, 9, 172, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 236, 80, + 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 156, 84, 7, 99, 0, 8, 126, 0, + 8, 62, 0, 9, 220, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 188, 0 + , 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 252, 96, 7, 256, 0, 8, 81, + 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, + 194, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 162, 0, 8, 1, 0, 8, + 129, 0, 8, 65, 0, 9, 226, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, + 146, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 210, 81, 7, 17, 0, + 8, 105, 0, 8, 41, 0, 9, 178, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, + 9, 242, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0 + , 8, 117, 0, 8, 53, 0, 9, 202, 81, 7, 13, 0, 8, 101, 0, 8, 37, + 0, 9, 170, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 234, 80, 7, 8, + 0, 8, 93, 0, 8, 29, 0, 9, 154, 84, 7, 83, 0, 8, 125, 0, 8, 61, + 0, 9, 218, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 186, + 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 250, 80, 7, 3, 0, 8, 83, + 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, + 198, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 166, 0, 8, 3, 0, 8, + 131, 0, 8, 67, 0, 9, 230, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, + 150, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 214, 82, 7, 19, 0, + 8, 107, 0, 8, 43, 0, 9, 182, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, + 9, 246, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, + 8, 119, 0, 8, 55, 0, 9, 206, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0 + , 9, 174, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 238, 80, 7, 9, 0 + , 8, 95, 0, 8, 31, 0, 9, 158, 84, 7, 99, 0, 8, 127, 0, 8, 63, + 0, 9, 222, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 190, 0, 8, 15 + , 0, 8, 143, 0, 8, 79, 0, 9, 254, 96, 7, 256, 0, 8, 80, 0, 8, + 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 193, 80, + 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 161, 0, 8, 0, 0, 8, 128, 0, 8 + , 64, 0, 9, 225, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 145, 83, + 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 209, 81, 7, 17, 0, 8, 104, 0 + , 8, 40, 0, 9, 177, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 241, + 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116 + , 0, 8, 52, 0, 9, 201, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, + 169, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 233, 80, 7, 8, 0, 8, + 92, 0, 8, 28, 0, 9, 153, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, + 217, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 185, 0, 8, 12, 0, 8 + , 140, 0, 8, 76, 0, 9, 249, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, + 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 197, 81, 7, 11, + 0, 8, 98, 0, 8, 34, 0, 9, 165, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0 + , 9, 229, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 149, 84, 7, 67, + 0, 8, 122, 0, 8, 58, 0, 9, 213, 82, 7, 19, 0, 8, 106, 0, 8, 42 + , 0, 9, 181, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 245, 80, 7, + 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, + 54, 0, 9, 205, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 173, 0, 8 + , 6, 0, 8, 134, 0, 8, 70, 0, 9, 237, 80, 7, 9, 0, 8, 94, 0, 8, + 30, 0, 9, 157, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 221, 82, + 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 189, 0, 8, + 14, 0, 8, 142, 0, 8, 78, 0, 9, 253, 96, 7, 256, 0, 8, 81, 0, 8 + , 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 195, + 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 163, 0, 8, 1, 0, 8, 129, + 0, 8, 65, 0, 9, 227, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 147, + 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 211, 81, 7, 17, 0, 8, + 105, 0, 8, 41, 0, 9, 179, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, + 243, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8 + , 117, 0, 8, 53, 0, 9, 203, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, + 9, 171, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 235, 80, 7, 8, 0, + 8, 93, 0, 8, 29, 0, 9, 155, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, + 9, 219, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 187, 0, 8, 13, 0 + , 8, 141, 0, 8, 77, 0, 9, 251, 80, 7, 3, 0, 8, 83, 0, 8, 19, + 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 199, 81, 7, + 11, 0, 8, 99, 0, 8, 35, 0, 9, 167, 0, 8, 3, 0, 8, 131, 0, 8, + 67, 0, 9, 231, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 151, 84, 7, + 67, 0, 8, 123, 0, 8, 59, 0, 9, 215, 82, 7, 19, 0, 8, 107, 0, 8 + , 43, 0, 9, 183, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 247, 80, + 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, + 8, 55, 0, 9, 207, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 175, 0 + , 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 239, 80, 7, 9, 0, 8, 95, 0, + 8, 31, 0, 9, 159, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 223, + 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 191, 0, 8, 15, 0, 8, 143 + , 0, 8, 79, 0, 9, 255 + }; + + //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_td'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] fixed_td = + { + 80, 5, 1, 87, 5, 257, 83, 5, 17, 91, 5, 4097, 81, 5, 5, 89, 5, + 1025, 85, 5, 65, 93, 5, 16385, 80, 5, 3, 88, 5, 513, 84, 5, 33 + , + 92, 5, 8193, 82, 5, 9, 90, 5, 2049, 86, 5, 129, 192, 5, 24577, + 80 + , 5, 2, 87, 5, 385, 83, 5, 25, 91, 5, 6145, 81, 5, 7, 89, 5, + 1537 + , 85, 5, 97, 93, 5, 24577, 80, 5, 4, 88, 5, 769, 84, 5, 49, 92 + , 5 + , 12289, 82, 5, 13, 90, 5, 3073, 86, 5, 193, 192, 5, 24577 + }; + + // Tables for deflate from PKZIP's appnote.txt. + //UPGRADE_NOTE: Final was removed from the declaration of 'cplens'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] cplens = + { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, + 51 + , 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 + }; + + // see note #13 above about 258 + //UPGRADE_NOTE: Final was removed from the declaration of 'cplext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] cplext = + { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4 + , 4 + , 4, 5, 5, 5, 5, 0, 112, 112 + }; + + //UPGRADE_NOTE: Final was removed from the declaration of 'cpdist'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] cpdist = + { + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, + 385 + , 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, + 16385, + 24577 + }; + + //UPGRADE_NOTE: Final was removed from the declaration of 'cpdext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal static readonly int[] cpdext = + { + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9 + , + 10, 10, 11, 11, 12, 12, 13, 13 + }; + + // If BMAX needs to be larger than 16, then h and x[] should be uLong. + internal int[] c; // bit length count table + internal int[] hn; // hufts used in space + internal int[] r; // table entry for structure assignment + internal int[] u; // table stack + internal int[] v; // work area for huft_build + internal int[] x; // bit offsets, then code stack + + private int huft_build(int[] b, int bindex, int n, int s, int[] d, int[] e, int[] t, int[] m, int[] hp, int[] hn, + int[] v) + { + // Given a list of code lengths and a maximum table size, make a set of + // tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR + // if the given code set is incomplete (the tables are still built in this + // case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of + // lengths), or Z_MEM_ERROR if not enough memory. + + int a; // counter for codes of length k + int f; // i repeats in table every f entries + int g; // maximum code length + int h; // table level + int i; // counter, current code + int j; // counter + int k; // number of bits in current code + int l; // bits per table (returned in m) + int mask; // (1 << w) - 1, to avoid cc -O bug on HP + int p; // pointer into c[], b[], or v[] + int q; // points to current table + int w; // bits before this table == (l * h) + int xp; // pointer into x + int y; // number of dummy codes added + int z; // number of entries in current table + + // Generate counts for each bit length + + p = 0; + i = n; + do + { + c[b[bindex + p]]++; + p++; + i--; // assume all entries <= BMAX + } + while (i != 0); + + if (c[0] == n) + { + // null input--all zero length codes + t[0] = -1; + m[0] = 0; + return Z_OK; + } + + // Find minimum and maximum length, bound *m by those + l = m[0]; + for (j = 1; j <= BMAX; j++) + { + if (c[j] != 0) + { + break; + } + } + k = j; // minimum code length + if (l < j) + { + l = j; + } + for (i = BMAX; i != 0; i--) + { + if (c[i] != 0) + { + break; + } + } + g = i; // maximum code length + if (l > i) + { + l = i; + } + m[0] = l; + + // Adjust last length count to fill out codes, if needed + for (y = 1 << j; j < i; j++, y <<= 1) + { + if ((y -= c[j]) < 0) + { + return Z_DATA_ERROR; + } + } + if ((y -= c[i]) < 0) + { + return Z_DATA_ERROR; + } + c[i] += y; + + // Generate starting offsets into the value table for each length + x[1] = j = 0; + p = 1; + xp = 2; + while (--i != 0) + { + // note that i == g from above + x[xp] = (j += c[p]); + xp++; + p++; + } + + // Make a table of values in order of bit lengths + i = 0; + p = 0; + do + { + if ((j = b[bindex + p]) != 0) + { + v[x[j]++] = i; + } + p++; + } + while (++i < n); + n = x[g]; // set n to length of v + + // Generate the Huffman codes and for each, make the table entries + x[0] = i = 0; // first Huffman code is zero + p = 0; // grab values in bit order + h = -1; // no tables yet--level -1 + w = -l; // bits decoded == (l * h) + u[0] = 0; // just to keep compilers happy + q = 0; // ditto + z = 0; // ditto + + // go through the bit lengths (k already is bits in shortest code) + for (; k <= g; k++) + { + a = c[k]; + while (a-- != 0) + { + // here i is the Huffman code of length k bits for value *p + // make tables up to required level + while (k > w + l) + { + h++; + w += l; // previous table always l bits + + // compute minimum size table less than or equal to l bits + z = g - w; + z = (z > l) ? l : z; // table size upper limit + if ((f = 1 << (j = k - w)) > a + 1) + { + // try a k-w bit table + // too few codes for k-w bit table + f -= (a + 1); // deduct codes from patterns left + xp = k; + if (j < z) + { + while (++j < z) + { + // try smaller tables up to z bits + if ((f <<= 1) <= c[++xp]) + { + break; // enough codes to use up j bits + } + f -= c[xp]; // else deduct codes from patterns + } + } + } + z = 1 << j; // table entries for j-bit table + + // allocate new table + if (hn[0] + z > MANY) + { + // (note: doesn't matter for fixed) + return Z_DATA_ERROR; // overflow of MANY + } + u[h] = q = hn[0]; // DEBUG + hn[0] += z; + + // connect to last table, if there is one + if (h != 0) + { + x[h] = i; // save pattern for backing up + r[0] = (sbyte)j; // bits in this table + r[1] = (sbyte)l; // bits to dump before this table + j = SharedUtils.URShift(i, (w - l)); + r[2] = (q - u[h - 1] - j); // offset to this table + Array.Copy(r, 0, hp, (u[h - 1] + j) * 3, 3); // connect to last table + } + else + { + t[0] = q; // first table is returned result + } + } + + // set up table entry in r + r[1] = (sbyte)(k - w); + if (p >= n) + { + r[0] = 128 + 64; // out of values--invalid code + } + else if (v[p] < s) + { + r[0] = (sbyte)(v[p] < 256 ? 0 : 32 + 64); // 256 is end-of-block + r[2] = v[p++]; // simple code is just the value + } + else + { + r[0] = (sbyte)(e[v[p] - s] + 16 + 64); // non-simple--look up in lists + r[2] = d[v[p++] - s]; + } + + // fill code-like entries with r + f = 1 << (k - w); + for (j = SharedUtils.URShift(i, w); j < z; j += f) + { + Array.Copy(r, 0, hp, (q + j) * 3, 3); + } + + // backwards increment the k-bit code i + for (j = 1 << (k - 1); (i & j) != 0; j = SharedUtils.URShift(j, 1)) + { + i ^= j; + } + i ^= j; + + // backup over finished tables + mask = (1 << w) - 1; // needed on HP, cc -O bug + while ((i & mask) != x[h]) + { + h--; // don't need to update q + w -= l; + mask = (1 << w) - 1; + } + } + } + + // Return Z_BUF_ERROR if we were given an incomplete table + return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK; + } + + internal int inflate_trees_bits(int[] c, int[] bb, int[] tb, int[] hp, ZlibCodec z) + { + int result; + initWorkArea(19); + hn[0] = 0; + result = huft_build(c, 0, 19, 19, null, null, tb, bb, hp, hn, v); + + if (result == Z_DATA_ERROR) + { + z.Message = "oversubscribed dynamic bit lengths tree"; + } + else if (result == Z_BUF_ERROR || bb[0] == 0) + { + z.Message = "incomplete dynamic bit lengths tree"; + result = Z_DATA_ERROR; + } + return result; + } + + internal int inflate_trees_dynamic(int nl, int nd, int[] c, int[] bl, int[] bd, int[] tl, int[] td, int[] hp, + ZlibCodec z) + { + int result; + + // build literal/length tree + initWorkArea(288); + hn[0] = 0; + result = huft_build(c, 0, nl, 257, cplens, cplext, tl, bl, hp, hn, v); + if (result != Z_OK || bl[0] == 0) + { + if (result == Z_DATA_ERROR) + { + z.Message = "oversubscribed literal/length tree"; + } + else if (result != Z_MEM_ERROR) + { + z.Message = "incomplete literal/length tree"; + result = Z_DATA_ERROR; + } + return result; + } + + // build distance tree + initWorkArea(288); + result = huft_build(c, nl, nd, 0, cpdist, cpdext, td, bd, hp, hn, v); + + if (result != Z_OK || (bd[0] == 0 && nl > 257)) + { + if (result == Z_DATA_ERROR) + { + z.Message = "oversubscribed distance tree"; + } + else if (result == Z_BUF_ERROR) + { + z.Message = "incomplete distance tree"; + result = Z_DATA_ERROR; + } + else if (result != Z_MEM_ERROR) + { + z.Message = "empty distance tree with lengths"; + result = Z_DATA_ERROR; + } + return result; + } + + return Z_OK; + } + + internal static int inflate_trees_fixed(int[] bl, int[] bd, int[][] tl, int[][] td, ZlibCodec z) + { + bl[0] = fixed_bl; + bd[0] = fixed_bd; + tl[0] = fixed_tl; + td[0] = fixed_td; + return Z_OK; + } + + private void initWorkArea(int vsize) + { + if (hn == null) + { + hn = new int[1]; + v = new int[vsize]; + c = new int[BMAX + 1]; + r = new int[3]; + u = new int[BMAX]; + x = new int[BMAX + 1]; + } + else + { + if (v.Length < vsize) + { + v = new int[vsize]; + } + Array.Clear(v, 0, vsize); + Array.Clear(c, 0, BMAX + 1); + r[0] = 0; + r[1] = 0; + r[2] = 0; + + // for(int i=0; i +// +// ------------------------------------------------------------------ +// +// This module defines classes for decompression. This code is derived +// from the jzlib implementation of zlib, but significantly modified. +// The object model is not the same, and many of the behaviors are +// different. Nonetheless, in keeping with the license for jzlib, I am +// reproducing the copyright to that code here. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + +using System; + +namespace SharpCompress.Compressors.Deflate +{ + internal sealed class InflateBlocks + { + private const int MANY = 1440; + + // Table for deflate from PKZIP's appnote.txt. + internal static readonly int[] border = {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + + internal ZlibCodec _codec; // pointer back to this zlib stream + internal int[] bb = new int[1]; // bit length tree depth + + // mode independent information + internal int bitb; // bit buffer + internal int bitk; // bits in bit buffer + internal int[] blens; // bit lengths of codes + internal uint check; // check on output + internal Object checkfn; // check function + internal InflateCodes codes = new InflateCodes(); // if CODES, current state + internal int end; // one byte after sliding window + internal int[] hufts; // single malloc for tree space + internal int index; // index into blens (or border) + internal InfTree inftree = new InfTree(); + internal int last; // true if this block is the last block + internal int left; // if STORED, bytes left to copy + private InflateBlockMode mode; // current inflate_block mode + internal int readAt; // window read pointer + internal int table; // table lengths (14 bits) + internal int[] tb = new int[1]; // bit length decoding tree + internal byte[] window; // sliding window + internal int writeAt; // window write pointer + + internal InflateBlocks(ZlibCodec codec, Object checkfn, int w) + { + _codec = codec; + hufts = new int[MANY * 3]; + window = new byte[w]; + end = w; + this.checkfn = checkfn; + mode = InflateBlockMode.TYPE; + Reset(); + } + + internal uint Reset() + { + uint oldCheck = check; + mode = InflateBlockMode.TYPE; + bitk = 0; + bitb = 0; + readAt = writeAt = 0; + + if (checkfn != null) + { + _codec._Adler32 = check = Adler.Adler32(0, null, 0, 0); + } + return oldCheck; + } + + internal int Process(int r) + { + int t; // temporary storage + int b; // bit buffer + int k; // bits in bit buffer + int p; // input data pointer + int n; // bytes available there + int q; // output window write pointer + int m; // bytes to end of window or read pointer + + // copy input/output information to locals (UPDATE macro restores) + + p = _codec.NextIn; + n = _codec.AvailableBytesIn; + b = bitb; + k = bitk; + + q = writeAt; + m = (q < readAt ? readAt - q - 1 : end - q); + + // process input based on current state + while (true) + { + switch (mode) + { + case InflateBlockMode.TYPE: + + while (k < (3)) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + t = (b & 7); + last = t & 1; + + switch ((uint)t >> 1) + { + case 0: // stored + b >>= 3; + k -= (3); + t = k & 7; // go to byte boundary + b >>= t; + k -= t; + mode = InflateBlockMode.LENS; // get length of stored block + break; + + case 1: // fixed + var bl = new int[1]; + var bd = new int[1]; + var tl = new int[1][]; + var td = new int[1][]; + InfTree.inflate_trees_fixed(bl, bd, tl, td, _codec); + codes.Init(bl[0], bd[0], tl[0], 0, td[0], 0); + b >>= 3; + k -= 3; + mode = InflateBlockMode.CODES; + break; + + case 2: // dynamic + b >>= 3; + k -= 3; + mode = InflateBlockMode.TABLE; + break; + + case 3: // illegal + b >>= 3; + k -= 3; + mode = InflateBlockMode.BAD; + _codec.Message = "invalid block type"; + r = ZlibConstants.Z_DATA_ERROR; + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + break; + + case InflateBlockMode.LENS: + + while (k < (32)) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + ; + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + if ((((~b) >> 16) & 0xffff) != (b & 0xffff)) + { + mode = InflateBlockMode.BAD; + _codec.Message = "invalid stored block lengths"; + r = ZlibConstants.Z_DATA_ERROR; + + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + left = (b & 0xffff); + b = k = 0; // dump bits + mode = left != 0 + ? InflateBlockMode.STORED + : (last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE); + break; + + case InflateBlockMode.STORED: + if (n == 0) + { + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + if (m == 0) + { + if (q == end && readAt != 0) + { + q = 0; + m = (q < readAt ? readAt - q - 1 : end - q); + } + if (m == 0) + { + writeAt = q; + r = Flush(r); + q = writeAt; + m = (q < readAt ? readAt - q - 1 : end - q); + if (q == end && readAt != 0) + { + q = 0; + m = (q < readAt ? readAt - q - 1 : end - q); + } + if (m == 0) + { + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + } + } + r = ZlibConstants.Z_OK; + + t = left; + if (t > n) + { + t = n; + } + if (t > m) + { + t = m; + } + Array.Copy(_codec.InputBuffer, p, window, q, t); + p += t; + n -= t; + q += t; + m -= t; + if ((left -= t) != 0) + { + break; + } + mode = last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE; + break; + + case InflateBlockMode.TABLE: + + while (k < (14)) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + table = t = (b & 0x3fff); + if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29) + { + mode = InflateBlockMode.BAD; + _codec.Message = "too many length or distance symbols"; + r = ZlibConstants.Z_DATA_ERROR; + + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f); + if (blens == null || blens.Length < t) + { + blens = new int[t]; + } + else + { + Array.Clear(blens, 0, t); + + // for (int i = 0; i < t; i++) + // { + // blens[i] = 0; + // } + } + + b >>= 14; + k -= 14; + + index = 0; + mode = InflateBlockMode.BTREE; + goto case InflateBlockMode.BTREE; + + case InflateBlockMode.BTREE: + while (index < 4 + (table >> 10)) + { + while (k < (3)) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + blens[border[index++]] = b & 7; + + b >>= 3; + k -= 3; + } + + while (index < 19) + { + blens[border[index++]] = 0; + } + + bb[0] = 7; + t = inftree.inflate_trees_bits(blens, bb, tb, hufts, _codec); + if (t != ZlibConstants.Z_OK) + { + r = t; + if (r == ZlibConstants.Z_DATA_ERROR) + { + blens = null; + mode = InflateBlockMode.BAD; + } + + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + index = 0; + mode = InflateBlockMode.DTREE; + goto case InflateBlockMode.DTREE; + + case InflateBlockMode.DTREE: + while (true) + { + t = table; + if (!(index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))) + { + break; + } + + int i, j, c; + + t = bb[0]; + + while (k < t) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + t = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 1]; + c = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 2]; + + if (c < 16) + { + b >>= t; + k -= t; + blens[index++] = c; + } + else + { + // c == 16..18 + i = c == 18 ? 7 : c - 14; + j = c == 18 ? 11 : 3; + + while (k < (t + i)) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + n--; + b |= (_codec.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + b >>= t; + k -= t; + + j += (b & InternalInflateConstants.InflateMask[i]); + + b >>= i; + k -= i; + + i = index; + t = table; + if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || (c == 16 && i < 1)) + { + blens = null; + mode = InflateBlockMode.BAD; + _codec.Message = "invalid bit length repeat"; + r = ZlibConstants.Z_DATA_ERROR; + + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + + c = (c == 16) ? blens[i - 1] : 0; + do + { + blens[i++] = c; + } + while (--j != 0); + index = i; + } + } + + tb[0] = -1; + { + var bl = new[] {9}; // must be <= 9 for lookahead assumptions + var bd = new[] {6}; // must be <= 9 for lookahead assumptions + var tl = new int[1]; + var td = new int[1]; + + t = table; + t = inftree.inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), blens, bl, bd, tl, + td, hufts, _codec); + + if (t != ZlibConstants.Z_OK) + { + if (t == ZlibConstants.Z_DATA_ERROR) + { + blens = null; + mode = InflateBlockMode.BAD; + } + r = t; + + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + codes.Init(bl[0], bd[0], hufts, tl[0], hufts, td[0]); + } + mode = InflateBlockMode.CODES; + goto case InflateBlockMode.CODES; + + case InflateBlockMode.CODES: + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + + r = codes.Process(this, r); + if (r != ZlibConstants.Z_STREAM_END) + { + return Flush(r); + } + + r = ZlibConstants.Z_OK; + p = _codec.NextIn; + n = _codec.AvailableBytesIn; + b = bitb; + k = bitk; + q = writeAt; + m = (q < readAt ? readAt - q - 1 : end - q); + + if (last == 0) + { + mode = InflateBlockMode.TYPE; + break; + } + mode = InflateBlockMode.DRY; + goto case InflateBlockMode.DRY; + + case InflateBlockMode.DRY: + writeAt = q; + r = Flush(r); + q = writeAt; + m = (q < readAt ? readAt - q - 1 : end - q); + if (readAt != writeAt) + { + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + mode = InflateBlockMode.DONE; + goto case InflateBlockMode.DONE; + + case InflateBlockMode.DONE: + r = ZlibConstants.Z_STREAM_END; + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + + case InflateBlockMode.BAD: + r = ZlibConstants.Z_DATA_ERROR; + + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + + default: + r = ZlibConstants.Z_STREAM_ERROR; + + bitb = b; + bitk = k; + _codec.AvailableBytesIn = n; + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + writeAt = q; + return Flush(r); + } + } + } + + internal void Free() + { + Reset(); + window = null; + hufts = null; + } + + internal void SetDictionary(byte[] d, int start, int n) + { + Array.Copy(d, start, window, 0, n); + readAt = writeAt = n; + } + + // Returns true if inflate is currently at the end of a block generated + // by Z_SYNC_FLUSH or Z_FULL_FLUSH. + internal int SyncPoint() + { + return mode == InflateBlockMode.LENS ? 1 : 0; + } + + // copy as much as possible from the sliding window to the output area + internal int Flush(int r) + { + int nBytes; + + for (int pass = 0; pass < 2; pass++) + { + if (pass == 0) + { + // compute number of bytes to copy as far as end of window + nBytes = ((readAt <= writeAt ? writeAt : end) - readAt); + } + else + { + // compute bytes to copy + nBytes = writeAt - readAt; + } + + // workitem 8870 + if (nBytes == 0) + { + if (r == ZlibConstants.Z_BUF_ERROR) + { + r = ZlibConstants.Z_OK; + } + return r; + } + + if (nBytes > _codec.AvailableBytesOut) + { + nBytes = _codec.AvailableBytesOut; + } + + if (nBytes != 0 && r == ZlibConstants.Z_BUF_ERROR) + { + r = ZlibConstants.Z_OK; + } + + // update counters + _codec.AvailableBytesOut -= nBytes; + _codec.TotalBytesOut += nBytes; + + // update check information + if (checkfn != null) + { + _codec._Adler32 = check = Adler.Adler32(check, window, readAt, nBytes); + } + + // copy as far as end of window + Array.Copy(window, readAt, _codec.OutputBuffer, _codec.NextOut, nBytes); + _codec.NextOut += nBytes; + readAt += nBytes; + + // see if more to copy at beginning of window + if (readAt == end && pass == 0) + { + // wrap pointers + readAt = 0; + if (writeAt == end) + { + writeAt = 0; + } + } + else + { + pass++; + } + } + + // done + return r; + } + + #region Nested type: InflateBlockMode + + private enum InflateBlockMode + { + TYPE = 0, // get type bits (3, including end bit) + LENS = 1, // get lengths for stored + STORED = 2, // processing stored block + TABLE = 3, // get table lengths + BTREE = 4, // get bit lengths tree for a dynamic block + DTREE = 5, // get length, distance trees for a dynamic block + CODES = 6, // processing fixed or dynamic block + DRY = 7, // output remaining window bytes + DONE = 8, // finished last block, done + BAD = 9 // ot a data error--stuck here + } + + #endregion + } + + internal static class InternalInflateConstants + { + // And'ing with mask[n] masks the lower n bits + internal static readonly int[] InflateMask = + { + 0x00000000, 0x00000001, 0x00000003, 0x00000007, + 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f, + 0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff, + 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff + }; + } + + internal sealed class InflateCodes + { + // waiting for "i:"=input, + // "o:"=output, + // "x:"=nothing + private const int START = 0; // x: set up for LEN + private const int LEN = 1; // i: get length/literal/eob next + private const int LENEXT = 2; // i: getting length extra (have base) + private const int DIST = 3; // i: get distance next + private const int DISTEXT = 4; // i: getting distance extra + private const int COPY = 5; // o: copying bytes in window, waiting for space + private const int LIT = 6; // o: got literal, waiting for output space + private const int WASH = 7; // o: got eob, possibly still output waiting + private const int END = 8; // x: got eob and all data flushed + private const int BADCODE = 9; // x: got error + + // if EXT or COPY, where and how much + internal int bitsToGet; // bits to get for extra + internal byte dbits; // dtree bits decoder per branch + internal int dist; // distance back to copy from + internal int[] dtree; // distance tree + internal int dtree_index; // distance tree + + internal byte lbits; // ltree bits decoded per branch + internal int len; + internal int lit; + internal int[] ltree; // literal/length/eob tree + internal int ltree_index; // literal/length/eob tree + internal int mode; // current inflate_codes mode + internal int need; // bits needed + internal int[] tree; // pointer into tree + internal int tree_index; + + internal void Init(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index) + { + mode = START; + lbits = (byte)bl; + dbits = (byte)bd; + ltree = tl; + ltree_index = tl_index; + dtree = td; + dtree_index = td_index; + tree = null; + } + + internal int Process(InflateBlocks blocks, int r) + { + int j; // temporary storage + int tindex; // temporary pointer + int e; // extra bits or operation + int b = 0; // bit buffer + int k = 0; // bits in bit buffer + int p = 0; // input data pointer + int n; // bytes available there + int q; // output window write pointer + int m; // bytes to end of window or read pointer + int f; // pointer to copy strings from + + ZlibCodec z = blocks._codec; + + // copy input/output information to locals (UPDATE macro restores) + p = z.NextIn; + n = z.AvailableBytesIn; + b = blocks.bitb; + k = blocks.bitk; + q = blocks.writeAt; + m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + + // process input and output based on current state + while (true) + { + switch (mode) + { + // waiting for "i:"=input, "o:"=output, "x:"=nothing + case START: // x: set up for LEN + if (m >= 258 && n >= 10) + { + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + r = InflateFast(lbits, dbits, ltree, ltree_index, dtree, dtree_index, blocks, z); + + p = z.NextIn; + n = z.AvailableBytesIn; + b = blocks.bitb; + k = blocks.bitk; + q = blocks.writeAt; + m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + + if (r != ZlibConstants.Z_OK) + { + mode = (r == ZlibConstants.Z_STREAM_END) ? WASH : BADCODE; + break; + } + } + need = lbits; + tree = ltree; + tree_index = ltree_index; + + mode = LEN; + goto case LEN; + + case LEN: // i: get length/literal/eob next + j = need; + + while (k < j) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + // Handling missing trailing bit(s) + var tmp_tindex = (tree_index + (b & InternalInflateConstants.InflateMask[k])) * 3; + if (k >= tree[tmp_tindex + 1]) + { + break; + } + + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3; + + b >>= (tree[tindex + 1]); + k -= (tree[tindex + 1]); + + e = tree[tindex]; + + if (e == 0) + { + // literal + lit = tree[tindex + 2]; + mode = LIT; + break; + } + if ((e & 16) != 0) + { + // length + bitsToGet = e & 15; + len = tree[tindex + 2]; + mode = LENEXT; + break; + } + if ((e & 64) == 0) + { + // next table + need = e; + tree_index = tindex / 3 + tree[tindex + 2]; + break; + } + if ((e & 32) != 0) + { + // end of block + mode = WASH; + break; + } + mode = BADCODE; // invalid code + z.Message = "invalid literal/length code"; + r = ZlibConstants.Z_DATA_ERROR; + + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + + case LENEXT: // i: getting length extra (have base) + j = bitsToGet; + + while (k < j) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + len += (b & InternalInflateConstants.InflateMask[j]); + + b >>= j; + k -= j; + + need = dbits; + tree = dtree; + tree_index = dtree_index; + mode = DIST; + goto case DIST; + + case DIST: // i: get distance next + j = need; + + while (k < j) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + // Handling missing trailing bit(s) + var tmp_tindex = (tree_index + (b & InternalInflateConstants.InflateMask[k])) * 3; + if (k >= tree[tmp_tindex + 1]) + { + break; + } + + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3; + + b >>= tree[tindex + 1]; + k -= tree[tindex + 1]; + + e = (tree[tindex]); + if ((e & 0x10) != 0) + { + // distance + bitsToGet = e & 15; + dist = tree[tindex + 2]; + mode = DISTEXT; + break; + } + if ((e & 64) == 0) + { + // next table + need = e; + tree_index = tindex / 3 + tree[tindex + 2]; + break; + } + mode = BADCODE; // invalid code + z.Message = "invalid distance code"; + r = ZlibConstants.Z_DATA_ERROR; + + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + + case DISTEXT: // i: getting distance extra + j = bitsToGet; + + while (k < j) + { + if (n != 0) + { + r = ZlibConstants.Z_OK; + } + else + { + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + dist += (b & InternalInflateConstants.InflateMask[j]); + + b >>= j; + k -= j; + + mode = COPY; + goto case COPY; + + case COPY: // o: copying bytes in window, waiting for space + f = q - dist; + while (f < 0) + { + // modulo window size-"while" instead + f += blocks.end; // of "if" handles invalid distances + } + while (len != 0) + { + if (m == 0) + { + if (q == blocks.end && blocks.readAt != 0) + { + q = 0; + m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + } + if (m == 0) + { + blocks.writeAt = q; + r = blocks.Flush(r); + q = blocks.writeAt; + m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + + if (q == blocks.end && blocks.readAt != 0) + { + q = 0; + m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + } + + if (m == 0) + { + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + } + } + + blocks.window[q++] = blocks.window[f++]; + m--; + + if (f == blocks.end) + { + f = 0; + } + len--; + } + mode = START; + break; + + case LIT: // o: got literal, waiting for output space + if (m == 0) + { + if (q == blocks.end && blocks.readAt != 0) + { + q = 0; + m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + } + if (m == 0) + { + blocks.writeAt = q; + r = blocks.Flush(r); + q = blocks.writeAt; + m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + + if (q == blocks.end && blocks.readAt != 0) + { + q = 0; + m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + } + if (m == 0) + { + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + } + } + r = ZlibConstants.Z_OK; + + blocks.window[q++] = (byte)lit; + m--; + + mode = START; + break; + + case WASH: // o: got eob, possibly more output + if (k > 7) + { + // return unused byte, if any + k -= 8; + n++; + p--; // can always return one + } + + blocks.writeAt = q; + r = blocks.Flush(r); + q = blocks.writeAt; + m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; + + if (blocks.readAt != blocks.writeAt) + { + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + mode = END; + goto case END; + + case END: + r = ZlibConstants.Z_STREAM_END; + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + + case BADCODE: // x: got error + + r = ZlibConstants.Z_DATA_ERROR; + + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + + default: + r = ZlibConstants.Z_STREAM_ERROR; + + blocks.bitb = b; + blocks.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + blocks.writeAt = q; + return blocks.Flush(r); + } + } + } + + // Called with number of bytes left to write in window at least 258 + // (the maximum string length) and number of input bytes available + // at least ten. The ten bytes are six bytes for the longest length/ + // distance pair plus four bytes for overloading the bit buffer. + + internal int InflateFast(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index, InflateBlocks s, + ZlibCodec z) + { + int t; // temporary pointer + int[] tp; // temporary pointer + int tp_index; // temporary pointer + int e; // extra bits or operation + int b; // bit buffer + int k; // bits in bit buffer + int p; // input data pointer + int n; // bytes available there + int q; // output window write pointer + int m; // bytes to end of window or read pointer + int ml; // mask for literal/length tree + int md; // mask for distance tree + int c; // bytes to copy + int d; // distance back to copy from + int r; // copy source pointer + + int tp_index_t_3; // (tp_index+t)*3 + + // load input, output, bit values + p = z.NextIn; + n = z.AvailableBytesIn; + b = s.bitb; + k = s.bitk; + q = s.writeAt; + m = q < s.readAt ? s.readAt - q - 1 : s.end - q; + + // initialize masks + ml = InternalInflateConstants.InflateMask[bl]; + md = InternalInflateConstants.InflateMask[bd]; + + // do until not enough input or output space for fast loop + do + { + // assume called with m >= 258 && n >= 10 + // get literal/length code + while (k < (20)) + { + // max bits for literal/length code + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + t = b & ml; + tp = tl; + tp_index = tl_index; + tp_index_t_3 = (tp_index + t) * 3; + if ((e = tp[tp_index_t_3]) == 0) + { + b >>= (tp[tp_index_t_3 + 1]); + k -= (tp[tp_index_t_3 + 1]); + + s.window[q++] = (byte)tp[tp_index_t_3 + 2]; + m--; + continue; + } + do + { + b >>= (tp[tp_index_t_3 + 1]); + k -= (tp[tp_index_t_3 + 1]); + + if ((e & 16) != 0) + { + e &= 15; + c = tp[tp_index_t_3 + 2] + (b & InternalInflateConstants.InflateMask[e]); + + b >>= e; + k -= e; + + // decode distance base of block to copy + while (k < 15) + { + // max bits for distance code + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + t = b & md; + tp = td; + tp_index = td_index; + tp_index_t_3 = (tp_index + t) * 3; + e = tp[tp_index_t_3]; + + do + { + b >>= (tp[tp_index_t_3 + 1]); + k -= (tp[tp_index_t_3 + 1]); + + if ((e & 16) != 0) + { + // get extra bits to add to distance base + e &= 15; + while (k < e) + { + // get extra bits (up to 13) + n--; + b |= (z.InputBuffer[p++] & 0xff) << k; + k += 8; + } + + d = tp[tp_index_t_3 + 2] + (b & InternalInflateConstants.InflateMask[e]); + + b >>= e; + k -= e; + + // do the copy + m -= c; + if (q >= d) + { + // offset before dest + // just copy + r = q - d; + if (q - r > 0 && 2 > (q - r)) + { + s.window[q++] = s.window[r++]; // minimum count is three, + s.window[q++] = s.window[r++]; // so unroll loop a little + c -= 2; + } + else + { + Array.Copy(s.window, r, s.window, q, 2); + q += 2; + r += 2; + c -= 2; + } + } + else + { + // else offset after destination + r = q - d; + do + { + r += s.end; // force pointer in window + } + while (r < 0); // covers invalid distances + e = s.end - r; + if (c > e) + { + // if source crosses, + c -= e; // wrapped copy + if (q - r > 0 && e > (q - r)) + { + do + { + s.window[q++] = s.window[r++]; + } + while (--e != 0); + } + else + { + Array.Copy(s.window, r, s.window, q, e); + q += e; + r += e; + e = 0; + } + r = 0; // copy rest from start of window + } + } + + // copy all or what's left + if (q - r > 0 && c > (q - r)) + { + do + { + s.window[q++] = s.window[r++]; + } + while (--c != 0); + } + else + { + Array.Copy(s.window, r, s.window, q, c); + q += c; + r += c; + c = 0; + } + break; + } + if ((e & 64) == 0) + { + t += tp[tp_index_t_3 + 2]; + t += (b & InternalInflateConstants.InflateMask[e]); + tp_index_t_3 = (tp_index + t) * 3; + e = tp[tp_index_t_3]; + } + else + { + z.Message = "invalid distance code"; + + c = z.AvailableBytesIn - n; + c = (k >> 3) < c ? k >> 3 : c; + n += c; + p -= c; + k -= (c << 3); + + s.bitb = b; + s.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + s.writeAt = q; + + return ZlibConstants.Z_DATA_ERROR; + } + } + while (true); + break; + } + + if ((e & 64) == 0) + { + t += tp[tp_index_t_3 + 2]; + t += (b & InternalInflateConstants.InflateMask[e]); + tp_index_t_3 = (tp_index + t) * 3; + if ((e = tp[tp_index_t_3]) == 0) + { + b >>= (tp[tp_index_t_3 + 1]); + k -= (tp[tp_index_t_3 + 1]); + s.window[q++] = (byte)tp[tp_index_t_3 + 2]; + m--; + break; + } + } + else if ((e & 32) != 0) + { + c = z.AvailableBytesIn - n; + c = (k >> 3) < c ? k >> 3 : c; + n += c; + p -= c; + k -= (c << 3); + + s.bitb = b; + s.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + s.writeAt = q; + + return ZlibConstants.Z_STREAM_END; + } + else + { + z.Message = "invalid literal/length code"; + + c = z.AvailableBytesIn - n; + c = (k >> 3) < c ? k >> 3 : c; + n += c; + p -= c; + k -= (c << 3); + + s.bitb = b; + s.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + s.writeAt = q; + + return ZlibConstants.Z_DATA_ERROR; + } + } + while (true); + } + while (m >= 258 && n >= 10); + + // not enough input or output--restore pointers and return + c = z.AvailableBytesIn - n; + c = (k >> 3) < c ? k >> 3 : c; + n += c; + p -= c; + k -= (c << 3); + + s.bitb = b; + s.bitk = k; + z.AvailableBytesIn = n; + z.TotalBytesIn += p - z.NextIn; + z.NextIn = p; + s.writeAt = q; + + return ZlibConstants.Z_OK; + } + } + + internal sealed class InflateManager + { + // preset dictionary flag in zlib header + private const int PRESET_DICT = 0x20; + + private const int Z_DEFLATED = 8; + private static readonly byte[] mark = {0, 0, 0xff, 0xff}; + + internal ZlibCodec _codec; // pointer back to this zlib stream + internal InflateBlocks blocks; // current inflate_blocks state + + // mode dependent information + + // if CHECK, check values to compare + internal uint computedCheck; // computed check value + internal uint expectedCheck; // stream check value + + // if BAD, inflateSync's marker bytes count + internal int marker; + internal int method; // if FLAGS, method byte + private InflateManagerMode mode; // current inflate mode + + // mode independent information + //internal int nowrap; // flag for no wrapper + + internal int wbits; // log2(window size) (8..15, defaults to 15) + + public InflateManager() + { + } + + public InflateManager(bool expectRfc1950HeaderBytes) + { + HandleRfc1950HeaderBytes = expectRfc1950HeaderBytes; + } + + internal bool HandleRfc1950HeaderBytes { get; set; } = true; + + internal int Reset() + { + _codec.TotalBytesIn = _codec.TotalBytesOut = 0; + _codec.Message = null; + mode = HandleRfc1950HeaderBytes ? InflateManagerMode.METHOD : InflateManagerMode.BLOCKS; + blocks.Reset(); + return ZlibConstants.Z_OK; + } + + internal int End() + { + if (blocks != null) + { + blocks.Free(); + } + blocks = null; + return ZlibConstants.Z_OK; + } + + internal int Initialize(ZlibCodec codec, int w) + { + _codec = codec; + _codec.Message = null; + blocks = null; + + // handle undocumented nowrap option (no zlib header or check) + //nowrap = 0; + //if (w < 0) + //{ + // w = - w; + // nowrap = 1; + //} + + // set window size + if (w < 8 || w > 15) + { + End(); + throw new ZlibException("Bad window size."); + + //return ZlibConstants.Z_STREAM_ERROR; + } + wbits = w; + + blocks = new InflateBlocks(codec, + HandleRfc1950HeaderBytes ? this : null, + 1 << w); + + // reset state + Reset(); + return ZlibConstants.Z_OK; + } + + internal int Inflate(FlushType flush) + { + int b; + + if (_codec.InputBuffer == null) + { + throw new ZlibException("InputBuffer is null. "); + } + + // int f = (flush == FlushType.Finish) + // ? ZlibConstants.Z_BUF_ERROR + // : ZlibConstants.Z_OK; + + // workitem 8870 + int f = ZlibConstants.Z_OK; + int r = ZlibConstants.Z_BUF_ERROR; + + while (true) + { + switch (mode) + { + case InflateManagerMode.METHOD: + if (_codec.AvailableBytesIn == 0) + { + return r; + } + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + if (((method = _codec.InputBuffer[_codec.NextIn++]) & 0xf) != Z_DEFLATED) + { + mode = InflateManagerMode.BAD; + _codec.Message = String.Format("unknown compression method (0x{0:X2})", method); + marker = 5; // can't try inflateSync + break; + } + if ((method >> 4) + 8 > wbits) + { + mode = InflateManagerMode.BAD; + _codec.Message = String.Format("invalid window size ({0})", (method >> 4) + 8); + marker = 5; // can't try inflateSync + break; + } + mode = InflateManagerMode.FLAG; + break; + + case InflateManagerMode.FLAG: + if (_codec.AvailableBytesIn == 0) + { + return r; + } + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + b = (_codec.InputBuffer[_codec.NextIn++]) & 0xff; + + if ((((method << 8) + b) % 31) != 0) + { + mode = InflateManagerMode.BAD; + _codec.Message = "incorrect header check"; + marker = 5; // can't try inflateSync + break; + } + + mode = ((b & PRESET_DICT) == 0) + ? InflateManagerMode.BLOCKS + : InflateManagerMode.DICT4; + break; + + case InflateManagerMode.DICT4: + if (_codec.AvailableBytesIn == 0) + { + return r; + } + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000); + mode = InflateManagerMode.DICT3; + break; + + case InflateManagerMode.DICT3: + if (_codec.AvailableBytesIn == 0) + { + return r; + } + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000); + mode = InflateManagerMode.DICT2; + break; + + case InflateManagerMode.DICT2: + + if (_codec.AvailableBytesIn == 0) + { + return r; + } + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00); + mode = InflateManagerMode.DICT1; + break; + + case InflateManagerMode.DICT1: + if (_codec.AvailableBytesIn == 0) + { + return r; + } + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff); + _codec._Adler32 = expectedCheck; + mode = InflateManagerMode.DICT0; + return ZlibConstants.Z_NEED_DICT; + + case InflateManagerMode.DICT0: + mode = InflateManagerMode.BAD; + _codec.Message = "need dictionary"; + marker = 0; // can try inflateSync + return ZlibConstants.Z_STREAM_ERROR; + + case InflateManagerMode.BLOCKS: + r = blocks.Process(r); + if (r == ZlibConstants.Z_DATA_ERROR) + { + mode = InflateManagerMode.BAD; + marker = 0; // can try inflateSync + break; + } + + if (r == ZlibConstants.Z_OK) + { + r = f; + } + + if (r != ZlibConstants.Z_STREAM_END) + { + return r; + } + + r = f; + computedCheck = blocks.Reset(); + if (!HandleRfc1950HeaderBytes) + { + mode = InflateManagerMode.DONE; + return ZlibConstants.Z_STREAM_END; + } + mode = InflateManagerMode.CHECK4; + break; + + case InflateManagerMode.CHECK4: + if (_codec.AvailableBytesIn == 0) + { + return r; + } + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000); + mode = InflateManagerMode.CHECK3; + break; + + case InflateManagerMode.CHECK3: + if (_codec.AvailableBytesIn == 0) + { + return r; + } + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000); + mode = InflateManagerMode.CHECK2; + break; + + case InflateManagerMode.CHECK2: + if (_codec.AvailableBytesIn == 0) + { + return r; + } + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00); + mode = InflateManagerMode.CHECK1; + break; + + case InflateManagerMode.CHECK1: + if (_codec.AvailableBytesIn == 0) + { + return r; + } + r = f; + _codec.AvailableBytesIn--; + _codec.TotalBytesIn++; + expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff); + if (computedCheck != expectedCheck) + { + mode = InflateManagerMode.BAD; + _codec.Message = "incorrect data check"; + marker = 5; // can't try inflateSync + break; + } + mode = InflateManagerMode.DONE; + return ZlibConstants.Z_STREAM_END; + + case InflateManagerMode.DONE: + return ZlibConstants.Z_STREAM_END; + + case InflateManagerMode.BAD: + throw new ZlibException(String.Format("Bad state ({0})", _codec.Message)); + + default: + throw new ZlibException("Stream error."); + } + } + } + + internal int SetDictionary(byte[] dictionary) + { + int index = 0; + int length = dictionary.Length; + if (mode != InflateManagerMode.DICT0) + { + throw new ZlibException("Stream error."); + } + + if (Adler.Adler32(1, dictionary, 0, dictionary.Length) != _codec._Adler32) + { + return ZlibConstants.Z_DATA_ERROR; + } + + _codec._Adler32 = Adler.Adler32(0, null, 0, 0); + + if (length >= (1 << wbits)) + { + length = (1 << wbits) - 1; + index = dictionary.Length - length; + } + blocks.SetDictionary(dictionary, index, length); + mode = InflateManagerMode.BLOCKS; + return ZlibConstants.Z_OK; + } + + internal int Sync() + { + int n; // number of bytes to look at + int p; // pointer to bytes + int m; // number of marker bytes found in a row + long r, w; // temporaries to save total_in and total_out + + // set up + if (mode != InflateManagerMode.BAD) + { + mode = InflateManagerMode.BAD; + marker = 0; + } + if ((n = _codec.AvailableBytesIn) == 0) + { + return ZlibConstants.Z_BUF_ERROR; + } + p = _codec.NextIn; + m = marker; + + // search + while (n != 0 && m < 4) + { + if (_codec.InputBuffer[p] == mark[m]) + { + m++; + } + else if (_codec.InputBuffer[p] != 0) + { + m = 0; + } + else + { + m = 4 - m; + } + p++; + n--; + } + + // restore + _codec.TotalBytesIn += p - _codec.NextIn; + _codec.NextIn = p; + _codec.AvailableBytesIn = n; + marker = m; + + // return no joy or set up to restart on a new block + if (m != 4) + { + return ZlibConstants.Z_DATA_ERROR; + } + r = _codec.TotalBytesIn; + w = _codec.TotalBytesOut; + Reset(); + _codec.TotalBytesIn = r; + _codec.TotalBytesOut = w; + mode = InflateManagerMode.BLOCKS; + return ZlibConstants.Z_OK; + } + + // Returns true if inflate is currently at the end of a block generated + // by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP + // implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH + // but removes the length bytes of the resulting empty stored block. When + // decompressing, PPP checks that at the end of input packet, inflate is + // waiting for these length bytes. + internal int SyncPoint(ZlibCodec z) + { + return blocks.SyncPoint(); + } + + #region Nested type: InflateManagerMode + + private enum InflateManagerMode + { + METHOD = 0, // waiting for method byte + FLAG = 1, // waiting for flag byte + DICT4 = 2, // four dictionary check bytes to go + DICT3 = 3, // three dictionary check bytes to go + DICT2 = 4, // two dictionary check bytes to go + DICT1 = 5, // one dictionary check byte to go + DICT0 = 6, // waiting for inflateSetDictionary + BLOCKS = 7, // decompressing blocks + CHECK4 = 8, // four check bytes to go + CHECK3 = 9, // three check bytes to go + CHECK2 = 10, // two check bytes to go + CHECK1 = 11, // one check byte to go + DONE = 12, // finished check, done + BAD = 13 // got an error--stay here + } + + #endregion + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/Tree.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/Tree.cs new file mode 100644 index 0000000000..a7b1779046 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/Tree.cs @@ -0,0 +1,487 @@ +// Tree.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-October-28 13:29:50> +// +// ------------------------------------------------------------------ +// +// This module defines classes for zlib compression and +// decompression. This code is derived from the jzlib implementation of +// zlib. In keeping with the license for jzlib, the copyright to that +// code is below. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + +using System; + +namespace SharpCompress.Compressors.Deflate +{ + internal sealed partial class DeflateManager + { + #region Nested type: Tree + + private sealed class Tree + { + internal const int Buf_size = 8 * 2; + private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1); + + internal static readonly sbyte[] bl_order = + { + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, + 14, + 1, 15 + }; + + // The lengths of the bit length codes are sent in order of decreasing + // probability, to avoid transmitting the lengths for unused bit + // length codes. + + // see definition of array dist_code below + //internal const int DIST_CODE_LEN = 512; + + private static readonly sbyte[] _dist_code = + { + 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, + 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, + 0, 0, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, + 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, + 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29 + }; + + internal static readonly sbyte[] LengthCode = + { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15 + , 15, 15, + 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17 + , 17, 17, + 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19 + , 19, 19, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20 + , 20, 20, + 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21 + , 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22 + , 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23 + , 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24 + , 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24 + , 24, 24, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25 + , 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25 + , 25, 25, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26 + , 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26 + , 26, 26, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27 + , 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27 + , 27, 28 + }; + + internal static readonly int[] LengthBase = + { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, + 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0 + }; + + internal static readonly int[] DistanceBase = + { + 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, + 192, + 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144 + , 8192, 12288, 16384, 24576 + }; + + internal short[] dyn_tree; // the dynamic tree + internal int max_code; // largest code with non zero frequency + internal StaticTree staticTree; // the corresponding static tree + + /// + /// Map from a distance to a distance code. + /// + /// + /// No side effects. _dist_code[256] and _dist_code[257] are never used. + /// + internal static int DistanceCode(int dist) + { + return (dist < 256) + ? _dist_code[dist] + : _dist_code[256 + SharedUtils.URShift(dist, 7)]; + } + + // Compute the optimal bit lengths for a tree and update the total bit length + // for the current block. + // IN assertion: the fields freq and dad are set, heap[heap_max] and + // above are the tree nodes sorted by increasing frequency. + // OUT assertions: the field len is set to the optimal bit length, the + // array bl_count contains the frequencies for each bit length. + // The length opt_len is updated; static_len is also updated if stree is + // not null. + internal void gen_bitlen(DeflateManager s) + { + short[] tree = dyn_tree; + short[] stree = staticTree.treeCodes; + int[] extra = staticTree.extraBits; + int base_Renamed = staticTree.extraBase; + int max_length = staticTree.maxLength; + int h; // heap index + int n, m; // iterate over the tree elements + int bits; // bit length + int xbits; // extra bits + short f; // frequency + int overflow = 0; // number of elements with bit length too large + + for (bits = 0; bits <= InternalConstants.MAX_BITS; bits++) + { + s.bl_count[bits] = 0; + } + + // In a first pass, compute the optimal bit lengths (which may + // overflow in the case of the bit length tree). + tree[s.heap[s.heap_max] * 2 + 1] = 0; // root of the heap + + for (h = s.heap_max + 1; h < HEAP_SIZE; h++) + { + n = s.heap[h]; + bits = tree[tree[n * 2 + 1] * 2 + 1] + 1; + if (bits > max_length) + { + bits = max_length; + overflow++; + } + tree[n * 2 + 1] = (short)bits; + + // We overwrite tree[n*2+1] which is no longer needed + + if (n > max_code) + { + continue; // not a leaf node + } + + s.bl_count[bits]++; + xbits = 0; + if (n >= base_Renamed) + { + xbits = extra[n - base_Renamed]; + } + f = tree[n * 2]; + s.opt_len += f * (bits + xbits); + if (stree != null) + { + s.static_len += f * (stree[n * 2 + 1] + xbits); + } + } + if (overflow == 0) + { + return; + } + + // This happens for example on obj2 and pic of the Calgary corpus + // Find the first bit length which could increase: + do + { + bits = max_length - 1; + while (s.bl_count[bits] == 0) + { + bits--; + } + s.bl_count[bits]--; // move one leaf down the tree + s.bl_count[bits + 1] = (short)(s.bl_count[bits + 1] + 2); // move one overflow item as its brother + s.bl_count[max_length]--; + + // The brother of the overflow item also moves one step up, + // but this does not affect bl_count[max_length] + overflow -= 2; + } + while (overflow > 0); + + for (bits = max_length; bits != 0; bits--) + { + n = s.bl_count[bits]; + while (n != 0) + { + m = s.heap[--h]; + if (m > max_code) + { + continue; + } + if (tree[m * 2 + 1] != bits) + { + s.opt_len = (int)(s.opt_len + (bits - (long)tree[m * 2 + 1]) * tree[m * 2]); + tree[m * 2 + 1] = (short)bits; + } + n--; + } + } + } + + // Construct one Huffman tree and assigns the code bit strings and lengths. + // Update the total bit length for the current block. + // IN assertion: the field freq is set for all tree elements. + // OUT assertions: the fields len and code are set to the optimal bit length + // and corresponding code. The length opt_len is updated; static_len is + // also updated if stree is not null. The field max_code is set. + internal void build_tree(DeflateManager s) + { + short[] tree = dyn_tree; + short[] stree = staticTree.treeCodes; + int elems = staticTree.elems; + int n, m; // iterate over heap elements + int max_code = -1; // largest code with non zero frequency + int node; // new node being created + + // Construct the initial heap, with least frequent element in + // heap[1]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. + // heap[0] is not used. + s.heap_len = 0; + s.heap_max = HEAP_SIZE; + + for (n = 0; n < elems; n++) + { + if (tree[n * 2] != 0) + { + s.heap[++s.heap_len] = max_code = n; + s.depth[n] = 0; + } + else + { + tree[n * 2 + 1] = 0; + } + } + + // The pkzip format requires that at least one distance code exists, + // and that at least one bit should be sent even if there is only one + // possible code. So to avoid special checks later on we force at least + // two codes of non zero frequency. + while (s.heap_len < 2) + { + node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0); + tree[node * 2] = 1; + s.depth[node] = 0; + s.opt_len--; + if (stree != null) + { + s.static_len -= stree[node * 2 + 1]; + } + + // node is 0 or 1 so it does not have extra bits + } + this.max_code = max_code; + + // The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, + // establish sub-heaps of increasing lengths: + + for (n = s.heap_len / 2; n >= 1; n--) + { + s.pqdownheap(tree, n); + } + + // Construct the Huffman tree by repeatedly combining the least two + // frequent nodes. + + node = elems; // next internal node of the tree + do + { + // n = node of least frequency + n = s.heap[1]; + s.heap[1] = s.heap[s.heap_len--]; + s.pqdownheap(tree, 1); + m = s.heap[1]; // m = node of next least frequency + + s.heap[--s.heap_max] = n; // keep the nodes sorted by frequency + s.heap[--s.heap_max] = m; + + // Create a new node father of n and m + tree[node * 2] = unchecked((short)(tree[n * 2] + tree[m * 2])); + s.depth[node] = (sbyte)(Math.Max((byte)s.depth[n], (byte)s.depth[m]) + 1); + tree[n * 2 + 1] = tree[m * 2 + 1] = (short)node; + + // and insert the new node in the heap + s.heap[1] = node++; + s.pqdownheap(tree, 1); + } + while (s.heap_len >= 2); + + s.heap[--s.heap_max] = s.heap[1]; + + // At this point, the fields freq and dad are set. We can now + // generate the bit lengths. + + gen_bitlen(s); + + // The field len is now set, we can generate the bit codes + gen_codes(tree, max_code, s.bl_count); + } + + // Generate the codes for a given tree and bit counts (which need not be + // optimal). + // IN assertion: the array bl_count contains the bit length statistics for + // the given tree and the field len is set for all tree elements. + // OUT assertion: the field code is set for all tree elements of non + // zero code length. + internal static void gen_codes(short[] tree, int max_code, short[] bl_count) + { + var next_code = new short[InternalConstants.MAX_BITS + 1]; // next code value for each bit length + short code = 0; // running code value + int bits; // bit index + int n; // code index + + // The distribution counts are first used to generate the code values + // without bit reversal. + for (bits = 1; bits <= InternalConstants.MAX_BITS; bits++) + { + unchecked + { + next_code[bits] = code = (short)((code + bl_count[bits - 1]) << 1); + } + } + + // Check that the bit counts in bl_count are consistent. The last code + // must be all ones. + //Assert (code + bl_count[MAX_BITS]-1 == (1<>= 1; //SharedUtils.URShift(code, 1); + res <<= 1; + } + while (--len > 0); + return res >> 1; + } + } + + #endregion + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/Zlib.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/Zlib.cs new file mode 100644 index 0000000000..1badf10b20 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/Zlib.cs @@ -0,0 +1,492 @@ +// Zlib.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-November-07 05:26:55> +// +// ------------------------------------------------------------------ +// +// This module defines classes for ZLIB compression and +// decompression. This code is derived from the jzlib implementation of +// zlib, but significantly modified. The object model is not the same, +// and many of the behaviors are new or different. Nonetheless, in +// keeping with the license for jzlib, the copyright to that code is +// included below. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + +using System; +using System.IO; + +namespace SharpCompress.Compressors.Deflate +{ + /// + /// The compression level to be used when using a DeflateStream or ZlibStream with CompressionMode.Compress. + /// + public enum CompressionLevel + { + /// + /// None means that the data will be simply stored, with no change at all. + /// If you are producing ZIPs for use on Mac OSX, be aware that archives produced with CompressionLevel.None + /// cannot be opened with the default zip reader. Use a different CompressionLevel. + /// + None = 0, + + /// + /// Same as None. + /// + Level0 = 0, + + /// + /// The fastest but least effective compression. + /// + BestSpeed = 1, + + /// + /// A synonym for BestSpeed. + /// + Level1 = 1, + + /// + /// A little slower, but better, than level 1. + /// + Level2 = 2, + + /// + /// A little slower, but better, than level 2. + /// + Level3 = 3, + + /// + /// A little slower, but better, than level 3. + /// + Level4 = 4, + + /// + /// A little slower than level 4, but with better compression. + /// + Level5 = 5, + + /// + /// The default compression level, with a good balance of speed and compression efficiency. + /// + Default = 6, + + /// + /// A synonym for Default. + /// + Level6 = 6, + + /// + /// Pretty good compression! + /// + Level7 = 7, + + /// + /// Better compression than Level7! + /// + Level8 = 8, + + /// + /// The "best" compression, where best means greatest reduction in size of the input data stream. + /// This is also the slowest compression. + /// + BestCompression = 9, + + /// + /// A synonym for BestCompression. + /// + Level9 = 9 + } + + /// + /// Describes options for how the compression algorithm is executed. Different strategies + /// work better on different sorts of data. The strategy parameter can affect the compression + /// ratio and the speed of compression but not the correctness of the compresssion. + /// + public enum CompressionStrategy + { + /// + /// The default strategy is probably the best for normal data. + /// + Default = 0, + + /// + /// The Filtered strategy is intended to be used most effectively with data produced by a + /// filter or predictor. By this definition, filtered data consists mostly of small + /// values with a somewhat random distribution. In this case, the compression algorithm + /// is tuned to compress them better. The effect of Filtered is to force more Huffman + /// coding and less string matching; it is a half-step between Default and HuffmanOnly. + /// + Filtered = 1, + + /// + /// Using HuffmanOnly will force the compressor to do Huffman encoding only, with no + /// string matching. + /// + HuffmanOnly = 2 + } + + /// + /// A general purpose exception class for exceptions in the Zlib library. + /// + public class ZlibException : Exception + { + /// + /// The ZlibException class captures exception information generated + /// by the Zlib library. + /// + public ZlibException() + { + } + + /// + /// This ctor collects a message attached to the exception. + /// + /// + public ZlibException(String s) + : base(s) + { + } + } + + internal class SharedUtils + { + /// + /// Performs an unsigned bitwise right shift with the specified number + /// + /// Number to operate on + /// Ammount of bits to shift + /// The resulting number from the shift operation + public static int URShift(int number, int bits) + { + return (int)((uint)number >> bits); + } + +#if NOT + +/// +/// Performs an unsigned bitwise right shift with the specified number +/// +/// Number to operate on +/// Ammount of bits to shift +/// The resulting number from the shift operation + public static long URShift(long number, int bits) + { + return (long) ((UInt64)number >> bits); + } +#endif + + /// + /// Reads a number of characters from the current source TextReader and writes + /// the data to the target array at the specified index. + /// + /// + /// The source TextReader to read from + /// Contains the array of characteres read from the source TextReader. + /// The starting index of the target array. + /// The maximum number of characters to read from the source TextReader. + /// + /// + /// The number of characters read. The number will be less than or equal to + /// count depending on the data available in the source TextReader. Returns -1 + /// if the end of the stream is reached. + /// + public static Int32 ReadInput(TextReader sourceTextReader, byte[] target, int start, int count) + { + // Returns 0 bytes if not enough space in target + if (target.Length == 0) + { + return 0; + } + + char[] charArray = new char[target.Length]; + int bytesRead = sourceTextReader.Read(charArray, start, count); + + // Returns -1 if EOF + if (bytesRead == 0) + { + return -1; + } + + for (int index = start; index < start + bytesRead; index++) + { + target[index] = (byte)charArray[index]; + } + + return bytesRead; + } + } + + internal static class InternalConstants + { + internal static readonly int MAX_BITS = 15; + internal static readonly int BL_CODES = 19; + internal static readonly int D_CODES = 30; + internal static readonly int LITERALS = 256; + internal static readonly int LENGTH_CODES = 29; + internal static readonly int L_CODES = (LITERALS + 1 + LENGTH_CODES); + + // Bit length codes must not exceed MAX_BL_BITS bits + internal static readonly int MAX_BL_BITS = 7; + + // repeat previous bit length 3-6 times (2 bits of repeat count) + internal static readonly int REP_3_6 = 16; + + // repeat a zero length 3-10 times (3 bits of repeat count) + internal static readonly int REPZ_3_10 = 17; + + // repeat a zero length 11-138 times (7 bits of repeat count) + internal static readonly int REPZ_11_138 = 18; + } + + internal sealed class StaticTree + { + internal static readonly short[] lengthAndLiteralsTreeCodes = + { + 12, 8, 140, 8, 76, 8, 204, 8, 44, 8, 172, + 8, 108, 8, 236, 8, + 28, 8, 156, 8, 92, 8, 220, 8, 60, 8, 188, + 8, 124, 8, 252, 8, + 2, 8, 130, 8, 66, 8, 194, 8, 34, 8, 162, 8 + , 98, 8, 226, 8, + 18, 8, 146, 8, 82, 8, 210, 8, 50, 8, 178, + 8, 114, 8, 242, 8, + 10, 8, 138, 8, 74, 8, 202, 8, 42, 8, 170, + 8, 106, 8, 234, 8, + 26, 8, 154, 8, 90, 8, 218, 8, 58, 8, 186, + 8, 122, 8, 250, 8, + 6, 8, 134, 8, 70, 8, 198, 8, 38, 8, 166, 8 + , 102, 8, 230, 8, + 22, 8, 150, 8, 86, 8, 214, 8, 54, 8, 182, + 8, 118, 8, 246, 8, + 14, 8, 142, 8, 78, 8, 206, 8, 46, 8, 174, + 8, 110, 8, 238, 8, + 30, 8, 158, 8, 94, 8, 222, 8, 62, 8, 190, + 8, 126, 8, 254, 8, + 1, 8, 129, 8, 65, 8, 193, 8, 33, 8, 161, 8 + , 97, 8, 225, 8, + 17, 8, 145, 8, 81, 8, 209, 8, 49, 8, 177, + 8, 113, 8, 241, 8, + 9, 8, 137, 8, 73, 8, 201, 8, 41, 8, 169, 8 + , 105, 8, 233, 8, + 25, 8, 153, 8, 89, 8, 217, 8, 57, 8, 185, + 8, 121, 8, 249, 8, + 5, 8, 133, 8, 69, 8, 197, 8, 37, 8, 165, 8 + , 101, 8, 229, 8, + 21, 8, 149, 8, 85, 8, 213, 8, 53, 8, 181, + 8, 117, 8, 245, 8, + 13, 8, 141, 8, 77, 8, 205, 8, 45, 8, 173, + 8, 109, 8, 237, 8, + 29, 8, 157, 8, 93, 8, 221, 8, 61, 8, 189, + 8, 125, 8, 253, 8, + 19, 9, 275, 9, 147, 9, 403, 9, 83, 9, 339, + 9, 211, 9, 467, 9, + 51, 9, 307, 9, 179, 9, 435, 9, 115, 9, 371 + , 9, 243, 9, 499, 9, + 11, 9, 267, 9, 139, 9, 395, 9, 75, 9, 331, + 9, 203, 9, 459, 9, + 43, 9, 299, 9, 171, 9, 427, 9, 107, 9, 363 + , 9, 235, 9, 491, 9, + 27, 9, 283, 9, 155, 9, 411, 9, 91, 9, 347, + 9, 219, 9, 475, 9, + 59, 9, 315, 9, 187, 9, 443, 9, 123, 9, 379 + , 9, 251, 9, 507, 9, + 7, 9, 263, 9, 135, 9, 391, 9, 71, 9, 327, + 9, 199, 9, 455, 9, + 39, 9, 295, 9, 167, 9, 423, 9, 103, 9, 359 + , 9, 231, 9, 487, 9, + 23, 9, 279, 9, 151, 9, 407, 9, 87, 9, 343, + 9, 215, 9, 471, 9, + 55, 9, 311, 9, 183, 9, 439, 9, 119, 9, 375 + , 9, 247, 9, 503, 9, + 15, 9, 271, 9, 143, 9, 399, 9, 79, 9, 335, + 9, 207, 9, 463, 9, + 47, 9, 303, 9, 175, 9, 431, 9, 111, 9, 367 + , 9, 239, 9, 495, 9, + 31, 9, 287, 9, 159, 9, 415, 9, 95, 9, 351, + 9, 223, 9, 479, 9, + 63, 9, 319, 9, 191, 9, 447, 9, 127, 9, 383 + , 9, 255, 9, 511, 9, + 0, 7, 64, 7, 32, 7, 96, 7, 16, 7, 80, 7, + 48, 7, 112, 7, + 8, 7, 72, 7, 40, 7, 104, 7, 24, 7, 88, 7, + 56, 7, 120, 7, + 4, 7, 68, 7, 36, 7, 100, 7, 20, 7, 84, 7, + 52, 7, 116, 7, + 3, 8, 131, 8, 67, 8, 195, 8, 35, 8, 163, 8 + , 99, 8, 227, 8 + }; + + internal static readonly short[] distTreeCodes = + { + 0, 5, 16, 5, 8, 5, 24, 5, 4, 5, 20, 5, 12, 5, 28, 5, + 2, 5, 18, 5, 10, 5, 26, 5, 6, 5, 22, 5, 14, 5, 30, 5, + 1, 5, 17, 5, 9, 5, 25, 5, 5, 5, 21, 5, 13, 5, 29, 5, + 3, 5, 19, 5, 11, 5, 27, 5, 7, 5, 23, 5 + }; + + // extra bits for each bit length code + internal static readonly int[] extra_blbits = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7}; + + internal static readonly StaticTree Literals; + internal static readonly StaticTree Distances; + internal static readonly StaticTree BitLengths; + + internal short[] treeCodes; // static tree or null + internal int[] extraBits; // extra bits for each code or null + internal int extraBase; // base index for extra_bits + internal int elems; // max number of elements in the tree + internal int maxLength; // max bit length for the codes + + private StaticTree(short[] treeCodes, int[] extraBits, int extraBase, int elems, int maxLength) + { + this.treeCodes = treeCodes; + this.extraBits = extraBits; + this.extraBase = extraBase; + this.elems = elems; + this.maxLength = maxLength; + } + + static StaticTree() + { + Literals = new StaticTree(lengthAndLiteralsTreeCodes, DeflateManager.ExtraLengthBits, + InternalConstants.LITERALS + 1, InternalConstants.L_CODES, + InternalConstants.MAX_BITS); + Distances = new StaticTree(distTreeCodes, DeflateManager.ExtraDistanceBits, 0, InternalConstants.D_CODES, + InternalConstants.MAX_BITS); + BitLengths = new StaticTree(null, extra_blbits, 0, InternalConstants.BL_CODES, InternalConstants.MAX_BL_BITS); + } + } + + /// + /// Computes an Adler-32 checksum. + /// + /// + /// The Adler checksum is similar to a CRC checksum, but faster to compute, though less + /// reliable. It is used in producing RFC1950 compressed streams. The Adler checksum + /// is a required part of the "ZLIB" standard. Applications will almost never need to + /// use this class directly. + /// + internal sealed class Adler + { + // largest prime smaller than 65536 + private static readonly uint BASE = 65521U; + + // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 + private static readonly int NMAX = 5552; + + internal static uint Adler32(uint adler, byte[] buf, int index, int len) + { + if (buf == null) + { + return 1; + } + + uint s1 = adler & 0xffffU; + uint s2 = (adler >> 16) & 0xffffU; + + while (len > 0) + { + int k = len < NMAX ? len : NMAX; + len -= k; + while (k >= 16) + { + //s1 += (buf[index++] & 0xff); s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + s1 += buf[index++]; + s2 += s1; + k -= 16; + } + if (k != 0) + { + do + { + s1 += buf[index++]; + s2 += s1; + } + while (--k != 0); + } + s1 %= BASE; + s2 %= BASE; + } + return (s2 << 16) | s1; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibBaseStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibBaseStream.cs new file mode 100644 index 0000000000..e499eeb711 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibBaseStream.cs @@ -0,0 +1,650 @@ +// ZlibBaseStream.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-October-28 15:45:15> +// +// ------------------------------------------------------------------ +// +// This module defines the ZlibBaseStream class, which is an intnernal +// base class for DeflateStream, ZlibStream and GZipStream. +// +// ------------------------------------------------------------------ + +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common; +using SharpCompress.Common.Tar.Headers; +using SharpCompress.Converters; +using System.Text; + +namespace SharpCompress.Compressors.Deflate +{ + internal enum ZlibStreamFlavor + { + ZLIB = 1950, + DEFLATE = 1951, + GZIP = 1952 + } + + internal class ZlibBaseStream : Stream + { + protected internal ZlibCodec _z; // deferred init... new ZlibCodec(); + + protected internal StreamMode _streamMode = StreamMode.Undefined; + protected internal FlushType _flushMode; + protected internal ZlibStreamFlavor _flavor; + protected internal CompressionMode _compressionMode; + protected internal CompressionLevel _level; + protected internal byte[] _workingBuffer; + protected internal int _bufferSize = ZlibConstants.WorkingBufferSizeDefault; + protected internal byte[] _buf1 = new byte[1]; + + protected internal Stream _stream; + protected internal CompressionStrategy Strategy = CompressionStrategy.Default; + + // workitem 7159 + private readonly CRC32 crc; + protected internal string _GzipFileName; + protected internal string _GzipComment; + protected internal DateTime _GzipMtime; + protected internal int _gzipHeaderByteCount; + + private readonly Encoding _encoding; + + internal int Crc32 + { + get + { + if (crc == null) + { + return 0; + } + return crc.Crc32Result; + } + } + + public ZlibBaseStream(Stream stream, + CompressionMode compressionMode, + CompressionLevel level, + ZlibStreamFlavor flavor, + Encoding encoding) + { + _flushMode = FlushType.None; + + //this._workingBuffer = new byte[WORKING_BUFFER_SIZE_DEFAULT]; + _stream = stream; + _compressionMode = compressionMode; + _flavor = flavor; + _level = level; + + _encoding = encoding; + + // workitem 7159 + if (flavor == ZlibStreamFlavor.GZIP) + { + crc = new CRC32(); + } + } + + protected internal bool _wantCompress => (_compressionMode == CompressionMode.Compress); + + private ZlibCodec z + { + get + { + if (_z == null) + { + bool wantRfc1950Header = (_flavor == ZlibStreamFlavor.ZLIB); + _z = new ZlibCodec(); + if (_compressionMode == CompressionMode.Decompress) + { + _z.InitializeInflate(wantRfc1950Header); + } + else + { + _z.Strategy = Strategy; + _z.InitializeDeflate(_level, wantRfc1950Header); + } + } + return _z; + } + } + + private byte[] workingBuffer + { + get + { + if (_workingBuffer == null) + { + _workingBuffer = new byte[_bufferSize]; + } + return _workingBuffer; + } + } + + public override void Write(Byte[] buffer, int offset, int count) + { + // workitem 7159 + // calculate the CRC on the unccompressed data (before writing) + if (crc != null) + { + crc.SlurpBlock(buffer, offset, count); + } + + if (_streamMode == StreamMode.Undefined) + { + _streamMode = StreamMode.Writer; + } + else if (_streamMode != StreamMode.Writer) + { + throw new ZlibException("Cannot Write after Reading."); + } + + if (count == 0) + { + return; + } + + // first reference of z property will initialize the private var _z + z.InputBuffer = buffer; + _z.NextIn = offset; + _z.AvailableBytesIn = count; + bool done = false; + do + { + _z.OutputBuffer = workingBuffer; + _z.NextOut = 0; + _z.AvailableBytesOut = _workingBuffer.Length; + int rc = (_wantCompress) + ? _z.Deflate(_flushMode) + : _z.Inflate(_flushMode); + if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) + { + throw new ZlibException((_wantCompress ? "de" : "in") + "flating: " + _z.Message); + } + + //if (_workingBuffer.Length - _z.AvailableBytesOut > 0) + _stream.Write(_workingBuffer, 0, _workingBuffer.Length - _z.AvailableBytesOut); + + done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0; + + // If GZIP and de-compress, we're done when 8 bytes remain. + if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress) + { + done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0); + } + } + while (!done); + } + + private void finish() + { + if (_z == null) + { + return; + } + + if (_streamMode == StreamMode.Writer) + { + bool done = false; + do + { + _z.OutputBuffer = workingBuffer; + _z.NextOut = 0; + _z.AvailableBytesOut = _workingBuffer.Length; + int rc = (_wantCompress) + ? _z.Deflate(FlushType.Finish) + : _z.Inflate(FlushType.Finish); + + if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK) + { + string verb = (_wantCompress ? "de" : "in") + "flating"; + if (_z.Message == null) + { + throw new ZlibException(String.Format("{0}: (rc = {1})", verb, rc)); + } + throw new ZlibException(verb + ": " + _z.Message); + } + + if (_workingBuffer.Length - _z.AvailableBytesOut > 0) + { + _stream.Write(_workingBuffer, 0, _workingBuffer.Length - _z.AvailableBytesOut); + } + + done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0; + + // If GZIP and de-compress, we're done when 8 bytes remain. + if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress) + { + done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0); + } + } + while (!done); + + Flush(); + + // workitem 7159 + if (_flavor == ZlibStreamFlavor.GZIP) + { + if (_wantCompress) + { + // Emit the GZIP trailer: CRC32 and size mod 2^32 + int c1 = crc.Crc32Result; + _stream.Write(DataConverter.LittleEndian.GetBytes(c1), 0, 4); + int c2 = (Int32)(crc.TotalBytesRead & 0x00000000FFFFFFFF); + _stream.Write(DataConverter.LittleEndian.GetBytes(c2), 0, 4); + } + else + { + throw new ZlibException("Writing with decompression is not supported."); + } + } + } + + // workitem 7159 + else if (_streamMode == StreamMode.Reader) + { + if (_flavor == ZlibStreamFlavor.GZIP) + { + if (!_wantCompress) + { + // workitem 8501: handle edge case (decompress empty stream) + if (_z.TotalBytesOut == 0L) + { + return; + } + + // Read and potentially verify the GZIP trailer: CRC32 and size mod 2^32 + byte[] trailer = new byte[8]; + + // workitem 8679 + if (_z.AvailableBytesIn != 8) + { + // Make sure we have read to the end of the stream + Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, _z.AvailableBytesIn); + int bytesNeeded = 8 - _z.AvailableBytesIn; + int bytesRead = _stream.Read(trailer, + _z.AvailableBytesIn, + bytesNeeded); + if (bytesNeeded != bytesRead) + { + throw new ZlibException(String.Format( + "Protocol error. AvailableBytesIn={0}, expected 8", + _z.AvailableBytesIn + bytesRead)); + } + } + else + { + Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, trailer.Length); + } + + Int32 crc32_expected = DataConverter.LittleEndian.GetInt32(trailer, 0); + Int32 crc32_actual = crc.Crc32Result; + Int32 isize_expected = DataConverter.LittleEndian.GetInt32(trailer, 4); + Int32 isize_actual = (Int32)(_z.TotalBytesOut & 0x00000000FFFFFFFF); + + if (crc32_actual != crc32_expected) + { + throw new ZlibException( + String.Format("Bad CRC32 in GZIP stream. (actual({0:X8})!=expected({1:X8}))", + crc32_actual, crc32_expected)); + } + + if (isize_actual != isize_expected) + { + throw new ZlibException( + String.Format("Bad size in GZIP stream. (actual({0})!=expected({1}))", isize_actual, + isize_expected)); + } + } + else + { + throw new ZlibException("Reading with compression is not supported."); + } + } + } + } + + private void end() + { + if (z == null) + { + return; + } + if (_wantCompress) + { + _z.EndDeflate(); + } + else + { + _z.EndInflate(); + } + _z = null; + } + + protected override void Dispose(bool disposing) + { + if (isDisposed) + { + return; + } + isDisposed = true; + base.Dispose(disposing); + if (disposing) + { + if (_stream == null) + { + return; + } + try + { + finish(); + } + finally + { + end(); + _stream?.Dispose(); + _stream = null; + } + } + } + + public override void Flush() + { + _stream.Flush(); + } + + public override Int64 Seek(Int64 offset, SeekOrigin origin) + { + throw new NotSupportedException(); + + //_outStream.Seek(offset, origin); + } + + public override void SetLength(Int64 value) + { + _stream.SetLength(value); + } + +#if NOT + public int Read() + { + if (Read(_buf1, 0, 1) == 0) + return 0; + // calculate CRC after reading + if (crc!=null) + crc.SlurpBlock(_buf1,0,1); + return (_buf1[0] & 0xFF); + } +#endif + + private bool nomoreinput; + private bool isDisposed; + + private string ReadZeroTerminatedString() + { + var list = new List(); + bool done = false; + do + { + // workitem 7740 + int n = _stream.Read(_buf1, 0, 1); + if (n != 1) + { + throw new ZlibException("Unexpected EOF reading GZIP header."); + } + if (_buf1[0] == 0) + { + done = true; + } + else + { + list.Add(_buf1[0]); + } + } + while (!done); + byte[] buffer = list.ToArray(); + return _encoding.GetString(buffer, 0, buffer.Length); + } + + private int _ReadAndValidateGzipHeader() + { + int totalBytesRead = 0; + + // read the header on the first read + byte[] header = new byte[10]; + int n = _stream.Read(header, 0, header.Length); + + // workitem 8501: handle edge case (decompress empty stream) + if (n == 0) + { + return 0; + } + + if (n != 10) + { + throw new ZlibException("Not a valid GZIP stream."); + } + + if (header[0] != 0x1F || header[1] != 0x8B || header[2] != 8) + { + throw new ZlibException("Bad GZIP header."); + } + + Int32 timet = DataConverter.LittleEndian.GetInt32(header, 4); + _GzipMtime = TarHeader.EPOCH.AddSeconds(timet); + totalBytesRead += n; + if ((header[3] & 0x04) == 0x04) + { + // read and discard extra field + n = _stream.Read(header, 0, 2); // 2-byte length field + totalBytesRead += n; + + Int16 extraLength = (Int16)(header[0] + header[1] * 256); + byte[] extra = new byte[extraLength]; + n = _stream.Read(extra, 0, extra.Length); + if (n != extraLength) + { + throw new ZlibException("Unexpected end-of-file reading GZIP header."); + } + totalBytesRead += n; + } + if ((header[3] & 0x08) == 0x08) + { + _GzipFileName = ReadZeroTerminatedString(); + } + if ((header[3] & 0x10) == 0x010) + { + _GzipComment = ReadZeroTerminatedString(); + } + if ((header[3] & 0x02) == 0x02) + { + Read(_buf1, 0, 1); // CRC16, ignore + } + + return totalBytesRead; + } + + public override Int32 Read(Byte[] buffer, Int32 offset, Int32 count) + { + // According to MS documentation, any implementation of the IO.Stream.Read function must: + // (a) throw an exception if offset & count reference an invalid part of the buffer, + // or if count < 0, or if buffer is null + // (b) return 0 only upon EOF, or if count = 0 + // (c) if not EOF, then return at least 1 byte, up to bytes + + if (_streamMode == StreamMode.Undefined) + { + if (!_stream.CanRead) + { + throw new ZlibException("The stream is not readable."); + } + + // for the first read, set up some controls. + _streamMode = StreamMode.Reader; + + // (The first reference to _z goes through the private accessor which + // may initialize it.) + z.AvailableBytesIn = 0; + if (_flavor == ZlibStreamFlavor.GZIP) + { + _gzipHeaderByteCount = _ReadAndValidateGzipHeader(); + + // workitem 8501: handle edge case (decompress empty stream) + if (_gzipHeaderByteCount == 0) + { + return 0; + } + } + } + + if (_streamMode != StreamMode.Reader) + { + throw new ZlibException("Cannot Read after Writing."); + } + + if (count == 0) + { + return 0; + } + if (nomoreinput && _wantCompress) + { + return 0; // workitem 8557 + } + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + if (offset < buffer.GetLowerBound(0)) + { + throw new ArgumentOutOfRangeException(nameof(offset)); + } + if ((offset + count) > buffer.GetLength(0)) + { + throw new ArgumentOutOfRangeException(nameof(count)); + } + + int rc = 0; + + // set up the output of the deflate/inflate codec: + _z.OutputBuffer = buffer; + _z.NextOut = offset; + _z.AvailableBytesOut = count; + + // This is necessary in case _workingBuffer has been resized. (new byte[]) + // (The first reference to _workingBuffer goes through the private accessor which + // may initialize it.) + _z.InputBuffer = workingBuffer; + + do + { + // need data in _workingBuffer in order to deflate/inflate. Here, we check if we have any. + if ((_z.AvailableBytesIn == 0) && (!nomoreinput)) + { + // No data available, so try to Read data from the captive stream. + _z.NextIn = 0; + _z.AvailableBytesIn = _stream.Read(_workingBuffer, 0, _workingBuffer.Length); + if (_z.AvailableBytesIn == 0) + { + nomoreinput = true; + } + } + + // we have data in InputBuffer; now compress or decompress as appropriate + rc = (_wantCompress) + ? _z.Deflate(_flushMode) + : _z.Inflate(_flushMode); + + if (nomoreinput && (rc == ZlibConstants.Z_BUF_ERROR)) + { + return 0; + } + + if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) + { + throw new ZlibException(String.Format("{0}flating: rc={1} msg={2}", (_wantCompress ? "de" : "in"), + rc, _z.Message)); + } + + if ((nomoreinput || rc == ZlibConstants.Z_STREAM_END) && (_z.AvailableBytesOut == count)) + { + break; // nothing more to read + } + } //while (_z.AvailableBytesOut == count && rc == ZlibConstants.Z_OK); + while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK); + + // workitem 8557 + // is there more room in output? + if (_z.AvailableBytesOut > 0) + { + if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0) + { + // deferred + } + + // are we completely done reading? + if (nomoreinput) + { + // and in compression? + if (_wantCompress) + { + // no more input data available; therefore we flush to + // try to complete the read + rc = _z.Deflate(FlushType.Finish); + + if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) + { + throw new ZlibException(String.Format("Deflating: rc={0} msg={1}", rc, _z.Message)); + } + } + } + } + + rc = (count - _z.AvailableBytesOut); + + // calculate CRC after reading + if (crc != null) + { + crc.SlurpBlock(buffer, offset, rc); + } + + return rc; + } + + public override Boolean CanRead => _stream.CanRead; + + public override Boolean CanSeek => _stream.CanSeek; + + public override Boolean CanWrite => _stream.CanWrite; + + public override Int64 Length => _stream.Length; + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + internal enum StreamMode + { + Writer, + Reader, + Undefined + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibCodec.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibCodec.cs new file mode 100644 index 0000000000..f2e9339b6e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibCodec.cs @@ -0,0 +1,746 @@ +// ZlibCodec.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-November-03 15:40:51> +// +// ------------------------------------------------------------------ +// +// This module defines a Codec for ZLIB compression and +// decompression. This code extends code that was based the jzlib +// implementation of zlib, but this code is completely novel. The codec +// class is new, and encapsulates some behaviors that are new, and some +// that were present in other classes in the jzlib code base. In +// keeping with the license for jzlib, the copyright to the jzlib code +// is included below. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + +using System; + +namespace SharpCompress.Compressors.Deflate +{ + /// + /// Encoder and Decoder for ZLIB and DEFLATE (IETF RFC1950 and RFC1951). + /// + /// + /// + /// This class compresses and decompresses data according to the Deflate algorithm + /// and optionally, the ZLIB format, as documented in RFC 1950 - ZLIB and RFC 1951 - DEFLATE. + /// + internal sealed class ZlibCodec + { + /// + /// The buffer from which data is taken. + /// + public byte[] InputBuffer; + + /// + /// An index into the InputBuffer array, indicating where to start reading. + /// + public int NextIn; + + /// + /// The number of bytes available in the InputBuffer, starting at NextIn. + /// + /// + /// Generally you should set this to InputBuffer.Length before the first Inflate() or Deflate() call. + /// The class will update this number as calls to Inflate/Deflate are made. + /// + public int AvailableBytesIn; + + /// + /// Total number of bytes read so far, through all calls to Inflate()/Deflate(). + /// + public long TotalBytesIn; + + /// + /// Buffer to store output data. + /// + public byte[] OutputBuffer; + + /// + /// An index into the OutputBuffer array, indicating where to start writing. + /// + public int NextOut; + + /// + /// The number of bytes available in the OutputBuffer, starting at NextOut. + /// + /// + /// Generally you should set this to OutputBuffer.Length before the first Inflate() or Deflate() call. + /// The class will update this number as calls to Inflate/Deflate are made. + /// + public int AvailableBytesOut; + + /// + /// Total number of bytes written to the output so far, through all calls to Inflate()/Deflate(). + /// + public long TotalBytesOut; + + /// + /// used for diagnostics, when something goes wrong! + /// + public String Message; + + internal DeflateManager dstate; + internal InflateManager istate; + + internal uint _Adler32; + + /// + /// The compression level to use in this codec. Useful only in compression mode. + /// + public CompressionLevel CompressLevel = CompressionLevel.Default; + + /// + /// The number of Window Bits to use. + /// + /// + /// This gauges the size of the sliding window, and hence the + /// compression effectiveness as well as memory consumption. It's best to just leave this + /// setting alone if you don't know what it is. The maximum value is 15 bits, which implies + /// a 32k window. + /// + public int WindowBits = ZlibConstants.WindowBitsDefault; + + /// + /// The compression strategy to use. + /// + /// + /// This is only effective in compression. The theory offered by ZLIB is that different + /// strategies could potentially produce significant differences in compression behavior + /// for different data sets. Unfortunately I don't have any good recommendations for how + /// to set it differently. When I tested changing the strategy I got minimally different + /// compression performance. It's best to leave this property alone if you don't have a + /// good feel for it. Or, you may want to produce a test harness that runs through the + /// different strategy options and evaluates them on different file types. If you do that, + /// let me know your results. + /// + public CompressionStrategy Strategy = CompressionStrategy.Default; + + /// + /// The Adler32 checksum on the data transferred through the codec so far. You probably don't need to look at this. + /// + public int Adler32 => (int)_Adler32; + + /// + /// Create a ZlibCodec. + /// + /// + /// If you use this default constructor, you will later have to explicitly call + /// InitializeInflate() or InitializeDeflate() before using the ZlibCodec to compress + /// or decompress. + /// + public ZlibCodec() + { + } + + /// + /// Create a ZlibCodec that either compresses or decompresses. + /// + /// + /// Indicates whether the codec should compress (deflate) or decompress (inflate). + /// + public ZlibCodec(CompressionMode mode) + { + if (mode == CompressionMode.Compress) + { + int rc = InitializeDeflate(); + if (rc != ZlibConstants.Z_OK) + { + throw new ZlibException("Cannot initialize for deflate."); + } + } + else if (mode == CompressionMode.Decompress) + { + int rc = InitializeInflate(); + if (rc != ZlibConstants.Z_OK) + { + throw new ZlibException("Cannot initialize for inflate."); + } + } + else + { + throw new ZlibException("Invalid ZlibStreamFlavor."); + } + } + + /// + /// Initialize the inflation state. + /// + /// + /// It is not necessary to call this before using the ZlibCodec to inflate data; + /// It is implicitly called when you call the constructor. + /// + /// Z_OK if everything goes well. + public int InitializeInflate() + { + return InitializeInflate(WindowBits); + } + + /// + /// Initialize the inflation state with an explicit flag to + /// govern the handling of RFC1950 header bytes. + /// + /// + /// + /// By default, the ZLIB header defined in RFC 1950 is expected. If + /// you want to read a zlib stream you should specify true for + /// expectRfc1950Header. If you have a deflate stream, you will want to specify + /// false. It is only necessary to invoke this initializer explicitly if you + /// want to specify false. + /// + /// + /// whether to expect an RFC1950 header byte + /// pair when reading the stream of data to be inflated. + /// + /// Z_OK if everything goes well. + public int InitializeInflate(bool expectRfc1950Header) + { + return InitializeInflate(WindowBits, expectRfc1950Header); + } + + /// + /// Initialize the ZlibCodec for inflation, with the specified number of window bits. + /// + /// The number of window bits to use. If you need to ask what that is, + /// then you shouldn't be calling this initializer. + /// Z_OK if all goes well. + public int InitializeInflate(int windowBits) + { + WindowBits = windowBits; + return InitializeInflate(windowBits, true); + } + + /// + /// Initialize the inflation state with an explicit flag to govern the handling of + /// RFC1950 header bytes. + /// + /// + /// + /// If you want to read a zlib stream you should specify true for + /// expectRfc1950Header. In this case, the library will expect to find a ZLIB + /// header, as defined in RFC + /// 1950, in the compressed stream. If you will be reading a DEFLATE or + /// GZIP stream, which does not have such a header, you will want to specify + /// false. + /// + /// + /// whether to expect an RFC1950 header byte pair when reading + /// the stream of data to be inflated. + /// The number of window bits to use. If you need to ask what that is, + /// then you shouldn't be calling this initializer. + /// Z_OK if everything goes well. + public int InitializeInflate(int windowBits, bool expectRfc1950Header) + { + WindowBits = windowBits; + if (dstate != null) + { + throw new ZlibException("You may not call InitializeInflate() after calling InitializeDeflate()."); + } + istate = new InflateManager(expectRfc1950Header); + return istate.Initialize(this, windowBits); + } + + /// + /// Inflate the data in the InputBuffer, placing the result in the OutputBuffer. + /// + /// + /// You must have set InputBuffer and OutputBuffer, NextIn and NextOut, and AvailableBytesIn and + /// AvailableBytesOut before calling this method. + /// + /// + /// + /// private void InflateBuffer() + /// { + /// int bufferSize = 1024; + /// byte[] buffer = new byte[bufferSize]; + /// ZlibCodec decompressor = new ZlibCodec(); + /// + /// Console.WriteLine("\n============================================"); + /// Console.WriteLine("Size of Buffer to Inflate: {0} bytes.", CompressedBytes.Length); + /// MemoryStream ms = new MemoryStream(DecompressedBytes); + /// + /// int rc = decompressor.InitializeInflate(); + /// + /// decompressor.InputBuffer = CompressedBytes; + /// decompressor.NextIn = 0; + /// decompressor.AvailableBytesIn = CompressedBytes.Length; + /// + /// decompressor.OutputBuffer = buffer; + /// + /// // pass 1: inflate + /// do + /// { + /// decompressor.NextOut = 0; + /// decompressor.AvailableBytesOut = buffer.Length; + /// rc = decompressor.Inflate(FlushType.None); + /// + /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) + /// throw new Exception("inflating: " + decompressor.Message); + /// + /// ms.Write(decompressor.OutputBuffer, 0, buffer.Length - decompressor.AvailableBytesOut); + /// } + /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0); + /// + /// // pass 2: finish and flush + /// do + /// { + /// decompressor.NextOut = 0; + /// decompressor.AvailableBytesOut = buffer.Length; + /// rc = decompressor.Inflate(FlushType.Finish); + /// + /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK) + /// throw new Exception("inflating: " + decompressor.Message); + /// + /// if (buffer.Length - decompressor.AvailableBytesOut > 0) + /// ms.Write(buffer, 0, buffer.Length - decompressor.AvailableBytesOut); + /// } + /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0); + /// + /// decompressor.EndInflate(); + /// } + /// + /// + /// + /// The flush to use when inflating. + /// Z_OK if everything goes well. + public int Inflate(FlushType flush) + { + if (istate == null) + { + throw new ZlibException("No Inflate State!"); + } + return istate.Inflate(flush); + } + + /// + /// Ends an inflation session. + /// + /// + /// Call this after successively calling Inflate(). This will cause all buffers to be flushed. + /// After calling this you cannot call Inflate() without a intervening call to one of the + /// InitializeInflate() overloads. + /// + /// Z_OK if everything goes well. + public int EndInflate() + { + if (istate == null) + { + throw new ZlibException("No Inflate State!"); + } + int ret = istate.End(); + istate = null; + return ret; + } + + /// + /// I don't know what this does! + /// + /// Z_OK if everything goes well. + public int SyncInflate() + { + if (istate == null) + { + throw new ZlibException("No Inflate State!"); + } + return istate.Sync(); + } + + /// + /// Initialize the ZlibCodec for deflation operation. + /// + /// + /// The codec will use the MAX window bits and the default level of compression. + /// + /// + /// + /// int bufferSize = 40000; + /// byte[] CompressedBytes = new byte[bufferSize]; + /// byte[] DecompressedBytes = new byte[bufferSize]; + /// + /// ZlibCodec compressor = new ZlibCodec(); + /// + /// compressor.InitializeDeflate(CompressionLevel.Default); + /// + /// compressor.InputBuffer = System.Text.ASCIIEncoding.ASCII.GetBytes(TextToCompress); + /// compressor.NextIn = 0; + /// compressor.AvailableBytesIn = compressor.InputBuffer.Length; + /// + /// compressor.OutputBuffer = CompressedBytes; + /// compressor.NextOut = 0; + /// compressor.AvailableBytesOut = CompressedBytes.Length; + /// + /// while (compressor.TotalBytesIn != TextToCompress.Length && compressor.TotalBytesOut < bufferSize) + /// { + /// compressor.Deflate(FlushType.None); + /// } + /// + /// while (true) + /// { + /// int rc= compressor.Deflate(FlushType.Finish); + /// if (rc == ZlibConstants.Z_STREAM_END) break; + /// } + /// + /// compressor.EndDeflate(); + /// + /// + /// + /// Z_OK if all goes well. You generally don't need to check the return code. + public int InitializeDeflate() + { + return _InternalInitializeDeflate(true); + } + + /// + /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel. + /// + /// + /// The codec will use the maximum window bits (15) and the specified + /// CompressionLevel. It will emit a ZLIB stream as it compresses. + /// + /// The compression level for the codec. + /// Z_OK if all goes well. + public int InitializeDeflate(CompressionLevel level) + { + CompressLevel = level; + return _InternalInitializeDeflate(true); + } + + /// + /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel, + /// and the explicit flag governing whether to emit an RFC1950 header byte pair. + /// + /// + /// The codec will use the maximum window bits (15) and the specified CompressionLevel. + /// If you want to generate a zlib stream, you should specify true for + /// wantRfc1950Header. In this case, the library will emit a ZLIB + /// header, as defined in RFC + /// 1950, in the compressed stream. + /// + /// The compression level for the codec. + /// whether to emit an initial RFC1950 byte pair in the compressed stream. + /// Z_OK if all goes well. + public int InitializeDeflate(CompressionLevel level, bool wantRfc1950Header) + { + CompressLevel = level; + return _InternalInitializeDeflate(wantRfc1950Header); + } + + /// + /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel, + /// and the specified number of window bits. + /// + /// + /// The codec will use the specified number of window bits and the specified CompressionLevel. + /// + /// The compression level for the codec. + /// the number of window bits to use. If you don't know what this means, don't use this method. + /// Z_OK if all goes well. + public int InitializeDeflate(CompressionLevel level, int bits) + { + CompressLevel = level; + WindowBits = bits; + return _InternalInitializeDeflate(true); + } + + /// + /// Initialize the ZlibCodec for deflation operation, using the specified + /// CompressionLevel, the specified number of window bits, and the explicit flag + /// governing whether to emit an RFC1950 header byte pair. + /// + /// + /// The compression level for the codec. + /// whether to emit an initial RFC1950 byte pair in the compressed stream. + /// the number of window bits to use. If you don't know what this means, don't use this method. + /// Z_OK if all goes well. + public int InitializeDeflate(CompressionLevel level, int bits, bool wantRfc1950Header) + { + CompressLevel = level; + WindowBits = bits; + return _InternalInitializeDeflate(wantRfc1950Header); + } + + private int _InternalInitializeDeflate(bool wantRfc1950Header) + { + if (istate != null) + { + throw new ZlibException("You may not call InitializeDeflate() after calling InitializeInflate()."); + } + dstate = new DeflateManager(); + dstate.WantRfc1950HeaderBytes = wantRfc1950Header; + + return dstate.Initialize(this, CompressLevel, WindowBits, Strategy); + } + + /// + /// Deflate one batch of data. + /// + /// + /// You must have set InputBuffer and OutputBuffer before calling this method. + /// + /// + /// + /// private void DeflateBuffer(CompressionLevel level) + /// { + /// int bufferSize = 1024; + /// byte[] buffer = new byte[bufferSize]; + /// ZlibCodec compressor = new ZlibCodec(); + /// + /// Console.WriteLine("\n============================================"); + /// Console.WriteLine("Size of Buffer to Deflate: {0} bytes.", UncompressedBytes.Length); + /// MemoryStream ms = new MemoryStream(); + /// + /// int rc = compressor.InitializeDeflate(level); + /// + /// compressor.InputBuffer = UncompressedBytes; + /// compressor.NextIn = 0; + /// compressor.AvailableBytesIn = UncompressedBytes.Length; + /// + /// compressor.OutputBuffer = buffer; + /// + /// // pass 1: deflate + /// do + /// { + /// compressor.NextOut = 0; + /// compressor.AvailableBytesOut = buffer.Length; + /// rc = compressor.Deflate(FlushType.None); + /// + /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) + /// throw new Exception("deflating: " + compressor.Message); + /// + /// ms.Write(compressor.OutputBuffer, 0, buffer.Length - compressor.AvailableBytesOut); + /// } + /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0); + /// + /// // pass 2: finish and flush + /// do + /// { + /// compressor.NextOut = 0; + /// compressor.AvailableBytesOut = buffer.Length; + /// rc = compressor.Deflate(FlushType.Finish); + /// + /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK) + /// throw new Exception("deflating: " + compressor.Message); + /// + /// if (buffer.Length - compressor.AvailableBytesOut > 0) + /// ms.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut); + /// } + /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0); + /// + /// compressor.EndDeflate(); + /// + /// ms.Seek(0, SeekOrigin.Begin); + /// CompressedBytes = new byte[compressor.TotalBytesOut]; + /// ms.Read(CompressedBytes, 0, CompressedBytes.Length); + /// } + /// + /// + /// whether to flush all data as you deflate. Generally you will want to + /// use Z_NO_FLUSH here, in a series of calls to Deflate(), and then call EndDeflate() to + /// flush everything. + /// + /// Z_OK if all goes well. + public int Deflate(FlushType flush) + { + if (dstate == null) + { + throw new ZlibException("No Deflate State!"); + } + return dstate.Deflate(flush); + } + + /// + /// End a deflation session. + /// + /// + /// Call this after making a series of one or more calls to Deflate(). All buffers are flushed. + /// + /// Z_OK if all goes well. + public int EndDeflate() + { + if (dstate == null) + { + throw new ZlibException("No Deflate State!"); + } + + // TODO: dinoch Tue, 03 Nov 2009 15:39 (test this) + //int ret = dstate.End(); + dstate = null; + return ZlibConstants.Z_OK; //ret; + } + + /// + /// Reset a codec for another deflation session. + /// + /// + /// Call this to reset the deflation state. For example if a thread is deflating + /// non-consecutive blocks, you can call Reset() after the Deflate(Sync) of the first + /// block and before the next Deflate(None) of the second block. + /// + /// Z_OK if all goes well. + public void ResetDeflate() + { + if (dstate == null) + { + throw new ZlibException("No Deflate State!"); + } + dstate.Reset(); + } + + /// + /// Set the CompressionStrategy and CompressionLevel for a deflation session. + /// + /// the level of compression to use. + /// the strategy to use for compression. + /// Z_OK if all goes well. + public int SetDeflateParams(CompressionLevel level, CompressionStrategy strategy) + { + if (dstate == null) + { + throw new ZlibException("No Deflate State!"); + } + return dstate.SetParams(level, strategy); + } + + /// + /// Set the dictionary to be used for either Inflation or Deflation. + /// + /// The dictionary bytes to use. + /// Z_OK if all goes well. + public int SetDictionary(byte[] dictionary) + { + if (istate != null) + { + return istate.SetDictionary(dictionary); + } + + if (dstate != null) + { + return dstate.SetDictionary(dictionary); + } + + throw new ZlibException("No Inflate or Deflate state!"); + } + + // Flush as much pending output as possible. All deflate() output goes + // through this function so some applications may wish to modify it + // to avoid allocating a large strm->next_out buffer and copying into it. + // (See also read_buf()). + internal void flush_pending() + { + int len = dstate.pendingCount; + + if (len > AvailableBytesOut) + { + len = AvailableBytesOut; + } + if (len == 0) + { + return; + } + + if (dstate.pending.Length <= dstate.nextPending || + OutputBuffer.Length <= NextOut || + dstate.pending.Length < (dstate.nextPending + len) || + OutputBuffer.Length < (NextOut + len)) + { + throw new ZlibException(String.Format("Invalid State. (pending.Length={0}, pendingCount={1})", + dstate.pending.Length, dstate.pendingCount)); + } + + Array.Copy(dstate.pending, dstate.nextPending, OutputBuffer, NextOut, len); + + NextOut += len; + dstate.nextPending += len; + TotalBytesOut += len; + AvailableBytesOut -= len; + dstate.pendingCount -= len; + if (dstate.pendingCount == 0) + { + dstate.nextPending = 0; + } + } + + // Read a new buffer from the current input stream, update the adler32 + // and total number of bytes read. All deflate() input goes through + // this function so some applications may wish to modify it to avoid + // allocating a large strm->next_in buffer and copying from it. + // (See also flush_pending()). + internal int read_buf(byte[] buf, int start, int size) + { + int len = AvailableBytesIn; + + if (len > size) + { + len = size; + } + if (len == 0) + { + return 0; + } + + AvailableBytesIn -= len; + + if (dstate.WantRfc1950HeaderBytes) + { + _Adler32 = Adler.Adler32(_Adler32, InputBuffer, NextIn, len); + } + Array.Copy(InputBuffer, NextIn, buf, start, len); + NextIn += len; + TotalBytesIn += len; + return len; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibConstants.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibConstants.cs new file mode 100644 index 0000000000..46dc62ee28 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibConstants.cs @@ -0,0 +1,125 @@ +// ZlibConstants.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2009-November-03 18:50:19> +// +// ------------------------------------------------------------------ +// +// This module defines constants used by the zlib class library. This +// code is derived from the jzlib implementation of zlib, but +// significantly modified. In keeping with the license for jzlib, the +// copyright to that code is included here. +// +// ------------------------------------------------------------------ +// +// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the distribution. +// +// 3. The names of the authors may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// +// This program is based on zlib-1.1.3; credit to authors +// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) +// and contributors of zlib. +// +// ----------------------------------------------------------------------- + + +namespace SharpCompress.Compressors.Deflate +{ + /// + /// A bunch of constants used in the Zlib interface. + /// + internal static class ZlibConstants + { + /// + /// The maximum number of window bits for the Deflate algorithm. + /// + public const int WindowBitsMax = 15; // 32K LZ77 window + + /// + /// The default number of window bits for the Deflate algorithm. + /// + public const int WindowBitsDefault = WindowBitsMax; + + /// + /// indicates everything is A-OK + /// + public const int Z_OK = 0; + + /// + /// Indicates that the last operation reached the end of the stream. + /// + public const int Z_STREAM_END = 1; + + /// + /// The operation ended in need of a dictionary. + /// + public const int Z_NEED_DICT = 2; + + /// + /// There was an error with the stream - not enough data, not open and readable, etc. + /// + public const int Z_STREAM_ERROR = -2; + + /// + /// There was an error with the data - not enough data, bad data, etc. + /// + public const int Z_DATA_ERROR = -3; + + /// + /// There was an error with the working buffer. + /// + public const int Z_BUF_ERROR = -5; + + /// + /// The size of the working buffer used in the ZlibCodec class. Defaults to 8192 bytes. + /// +#if NETCF + public const int WorkingBufferSizeDefault = 8192; +#else + public const int WorkingBufferSizeDefault = 16384; +#endif + + /// + /// The minimum size of the working buffer used in the ZlibCodec class. Currently it is 128 bytes. + /// + public const int WorkingBufferSizeMin = 1024; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibStream.cs new file mode 100644 index 0000000000..c82040ef7b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate/ZlibStream.cs @@ -0,0 +1,344 @@ +// ZlibStream.cs +// ------------------------------------------------------------------ +// +// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. +// All rights reserved. +// +// This code module is part of DotNetZip, a zipfile class library. +// +// ------------------------------------------------------------------ +// +// This code is licensed under the Microsoft Public License. +// See the file License.txt for the license details. +// More info on: http://dotnetzip.codeplex.com +// +// ------------------------------------------------------------------ +// +// last saved (in emacs): +// Time-stamp: <2010-January-29 16:35:23> +// +// ------------------------------------------------------------------ +// +// This module defines the ZlibStream class, which is similar in idea to +// the System.IO.Compression.DeflateStream and +// System.IO.Compression.GZipStream classes in the .NET BCL. +// +// ------------------------------------------------------------------ + +using System; +using System.IO; +using System.Text; + +namespace SharpCompress.Compressors.Deflate +{ + public class ZlibStream : Stream + { + private readonly ZlibBaseStream _baseStream; + private bool _disposed; + + public ZlibStream(Stream stream, CompressionMode mode) + : this(stream, mode, CompressionLevel.Default, Encoding.UTF8) + { + } + + public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level) + : this(stream, mode, level, Encoding.UTF8) + { + } + + public ZlibStream(Stream stream, CompressionMode mode, CompressionLevel level, Encoding encoding) + { + _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, encoding); + } + + #region Zlib properties + + /// + /// This property sets the flush behavior on the stream. + /// Sorry, though, not sure exactly how to describe all the various settings. + /// + public virtual FlushType FlushMode + { + get => (_baseStream._flushMode); + set + { + if (_disposed) + { + throw new ObjectDisposedException("ZlibStream"); + } + _baseStream._flushMode = value; + } + } + + /// + /// The size of the working buffer for the compression codec. + /// + /// + /// + /// + /// The working buffer is used for all stream operations. The default size is + /// 1024 bytes. The minimum size is 128 bytes. You may get better performance + /// with a larger buffer. Then again, you might not. You would have to test + /// it. + /// + /// + /// + /// Set this before the first call to Read() or Write() on the + /// stream. If you try to set it afterwards, it will throw. + /// + /// + public int BufferSize + { + get => _baseStream._bufferSize; + set + { + if (_disposed) + { + throw new ObjectDisposedException("ZlibStream"); + } + if (_baseStream._workingBuffer != null) + { + throw new ZlibException("The working buffer is already set."); + } + if (value < ZlibConstants.WorkingBufferSizeMin) + { + throw new ZlibException( + String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, + ZlibConstants.WorkingBufferSizeMin)); + } + _baseStream._bufferSize = value; + } + } + + /// Returns the total number of bytes input so far. + public virtual long TotalIn => _baseStream._z.TotalBytesIn; + + /// Returns the total number of bytes output so far. + public virtual long TotalOut => _baseStream._z.TotalBytesOut; + + #endregion + + #region System.IO.Stream methods + + /// + /// Indicates whether the stream can be read. + /// + /// + /// The return value depends on whether the captive stream supports reading. + /// + public override bool CanRead + { + get + { + if (_disposed) + { + throw new ObjectDisposedException("ZlibStream"); + } + return _baseStream._stream.CanRead; + } + } + + /// + /// Indicates whether the stream supports Seek operations. + /// + /// + /// Always returns false. + /// + public override bool CanSeek => false; + + /// + /// Indicates whether the stream can be written. + /// + /// + /// The return value depends on whether the captive stream supports writing. + /// + public override bool CanWrite + { + get + { + if (_disposed) + { + throw new ObjectDisposedException("ZlibStream"); + } + return _baseStream._stream.CanWrite; + } + } + + /// + /// Reading this property always throws a . + /// + public override long Length => throw new NotSupportedException(); + + /// + /// The position of the stream pointer. + /// + /// + /// + /// Setting this property always throws a . Reading will return the total bytes + /// written out, if used in writing, or the total bytes read in, if used in + /// reading. The count may refer to compressed bytes or uncompressed bytes, + /// depending on how you've used the stream. + /// + public override long Position + { + get + { + if (_baseStream._streamMode == ZlibBaseStream.StreamMode.Writer) + { + return _baseStream._z.TotalBytesOut; + } + if (_baseStream._streamMode == ZlibBaseStream.StreamMode.Reader) + { + return _baseStream._z.TotalBytesIn; + } + return 0; + } + + set => throw new NotSupportedException(); + } + + /// + /// Dispose the stream. + /// + /// + /// This may or may not result in a Close() call on the captive stream. + /// + protected override void Dispose(bool disposing) + { + try + { + if (!_disposed) + { + if (disposing) + { + _baseStream?.Dispose(); + } + _disposed = true; + } + } + finally + { + base.Dispose(disposing); + } + } + + /// + /// Flush the stream. + /// + public override void Flush() + { + if (_disposed) + { + throw new ObjectDisposedException("ZlibStream"); + } + _baseStream.Flush(); + } + + /// + /// Read data from the stream. + /// + /// + /// + /// + /// + /// If you wish to use the ZlibStream to compress data while reading, + /// you can create a ZlibStream with CompressionMode.Compress, + /// providing an uncompressed data stream. Then call Read() on that + /// ZlibStream, and the data read will be compressed. If you wish to + /// use the ZlibStream to decompress data while reading, you can create + /// a ZlibStream with CompressionMode.Decompress, providing a + /// readable compressed data stream. Then call Read() on that + /// ZlibStream, and the data will be decompressed as it is read. + /// + /// + /// + /// A ZlibStream can be used for Read() or Write(), but + /// not both. + /// + /// + /// + /// The buffer into which the read data should be placed. + /// the offset within that data array to put the first byte read. + /// the number of bytes to read. + public override int Read(byte[] buffer, int offset, int count) + { + if (_disposed) + { + throw new ObjectDisposedException("ZlibStream"); + } + return _baseStream.Read(buffer, offset, count); + } + + public override int ReadByte() + { + if (_disposed) + { + throw new ObjectDisposedException("ZlibStream"); + } + return _baseStream.ReadByte(); + } + + /// + /// Calling this method always throws a . + /// + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + /// + /// Calling this method always throws a . + /// + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + /// + /// Write data to the stream. + /// + /// + /// + /// + /// + /// If you wish to use the ZlibStream to compress data while writing, + /// you can create a ZlibStream with CompressionMode.Compress, + /// and a writable output stream. Then call Write() on that + /// ZlibStream, providing uncompressed data as input. The data sent to + /// the output stream will be the compressed form of the data written. If you + /// wish to use the ZlibStream to decompress data while writing, you + /// can create a ZlibStream with CompressionMode.Decompress, and a + /// writable output stream. Then call Write() on that stream, + /// providing previously compressed data. The data sent to the output stream + /// will be the decompressed form of the data written. + /// + /// + /// + /// A ZlibStream can be used for Read() or Write(), but not both. + /// + /// + /// The buffer holding data to write to the stream. + /// the offset within that data array to find the first byte to write. + /// the number of bytes to write. + public override void Write(byte[] buffer, int offset, int count) + { + if (_disposed) + { + throw new ObjectDisposedException("ZlibStream"); + } + _baseStream.Write(buffer, offset, count); + } + + public override void WriteByte(byte value) + { + if (_disposed) + { + throw new ObjectDisposedException("ZlibStream"); + } + _baseStream.WriteByte(value); + } + + #endregion System.IO.Stream methods + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/BlockType.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/BlockType.cs new file mode 100644 index 0000000000..c89380798a --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/BlockType.cs @@ -0,0 +1,13 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +namespace SharpCompress.Compressors.Deflate64 +{ + internal enum BlockType + { + Uncompressed = 0, + Static = 1, + Dynamic = 2 + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/Deflate64Stream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/Deflate64Stream.cs new file mode 100644 index 0000000000..be7300c200 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/Deflate64Stream.cs @@ -0,0 +1,255 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using SharpCompress.Common.Zip; +using SharpCompress.Compressors.Deflate; +using System; +using System.Diagnostics; +using System.IO; +using System.Runtime.CompilerServices; + +namespace SharpCompress.Compressors.Deflate64 +{ + public sealed class Deflate64Stream : Stream + { + private const int DEFAULT_BUFFER_SIZE = 8192; + + private Stream _stream; + private CompressionMode _mode; + private InflaterManaged _inflater; + private byte[] _buffer; + + public Deflate64Stream(Stream stream, CompressionMode mode) + { + if (stream == null) + throw new ArgumentNullException(nameof(stream)); + if (mode != CompressionMode.Decompress) + throw new NotImplementedException("Deflate64: this implementation only supports decompression"); + if (!stream.CanRead) + throw new ArgumentException("Deflate64: input stream is not readable", nameof(stream)); + + InitializeInflater(stream, ZipCompressionMethod.Deflate64); + } + + /// + /// Sets up this DeflateManagedStream to be used for Inflation/Decompression + /// + private void InitializeInflater(Stream stream, ZipCompressionMethod method = ZipCompressionMethod.Deflate) + { + Debug.Assert(stream != null); + Debug.Assert(method == ZipCompressionMethod.Deflate || method == ZipCompressionMethod.Deflate64); + if (!stream.CanRead) + throw new ArgumentException("Deflate64: input stream is not readable", nameof(stream)); + + _inflater = new InflaterManaged(method == ZipCompressionMethod.Deflate64); + + _stream = stream; + _mode = CompressionMode.Decompress; + _buffer = new byte[DEFAULT_BUFFER_SIZE]; + } + + public override bool CanRead + { + get + { + if (_stream == null) + { + return false; + } + + return (_mode == CompressionMode.Decompress && _stream.CanRead); + } + } + + public override bool CanWrite + { + get + { + if (_stream == null) + { + return false; + } + + return (_mode == CompressionMode.Compress && _stream.CanWrite); + } + } + + public override bool CanSeek => false; + + public override long Length + { + get { throw new NotSupportedException("Deflate64: not supported"); } + } + + public override long Position + { + get { throw new NotSupportedException("Deflate64: not supported"); } + set { throw new NotSupportedException("Deflate64: not supported"); } + } + + public override void Flush() + { + EnsureNotDisposed(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException("Deflate64: not supported"); + } + + public override void SetLength(long value) + { + throw new NotSupportedException("Deflate64: not supported"); + } + + public override int Read(byte[] array, int offset, int count) + { + EnsureDecompressionMode(); + ValidateParameters(array, offset, count); + EnsureNotDisposed(); + + int bytesRead; + int currentOffset = offset; + int remainingCount = count; + + while (true) + { + bytesRead = _inflater.Inflate(array, currentOffset, remainingCount); + currentOffset += bytesRead; + remainingCount -= bytesRead; + + if (remainingCount == 0) + { + break; + } + + if (_inflater.Finished()) + { + // if we finished decompressing, we can't have anything left in the outputwindow. + Debug.Assert(_inflater.AvailableOutput == 0, "We should have copied all stuff out!"); + break; + } + + int bytes = _stream.Read(_buffer, 0, _buffer.Length); + if (bytes <= 0) + { + break; + } + else if (bytes > _buffer.Length) + { + // The stream is either malicious or poorly implemented and returned a number of + // bytes larger than the buffer supplied to it. + throw new InvalidDataException("Deflate64: invalid data"); + } + + _inflater.SetInput(_buffer, 0, bytes); + } + + return count - remainingCount; + } + + private void ValidateParameters(byte[] array, int offset, int count) + { + if (array == null) + throw new ArgumentNullException(nameof(array)); + + if (offset < 0) + throw new ArgumentOutOfRangeException(nameof(offset)); + + if (count < 0) + throw new ArgumentOutOfRangeException(nameof(count)); + + if (array.Length - offset < count) + throw new ArgumentException("Deflate64: invalid offset/count combination"); + } + + private void EnsureNotDisposed() + { + if (_stream == null) + ThrowStreamClosedException(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private static void ThrowStreamClosedException() + { + throw new ObjectDisposedException(null, "Deflate64: stream has been disposed"); + } + + private void EnsureDecompressionMode() + { + if (_mode != CompressionMode.Decompress) + ThrowCannotReadFromDeflateManagedStreamException(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private static void ThrowCannotReadFromDeflateManagedStreamException() + { + throw new InvalidOperationException("Deflate64: cannot read from this stream"); + } + + private void EnsureCompressionMode() + { + if (_mode != CompressionMode.Compress) + ThrowCannotWriteToDeflateManagedStreamException(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private static void ThrowCannotWriteToDeflateManagedStreamException() + { + throw new InvalidOperationException("Deflate64: cannot write to this stream"); + } + + public override void Write(byte[] array, int offset, int count) + { + ThrowCannotWriteToDeflateManagedStreamException(); + } + + // This is called by Dispose: + private void PurgeBuffers(bool disposing) + { + if (!disposing) + return; + + if (_stream == null) + return; + + Flush(); + } + + protected override void Dispose(bool disposing) + { + try + { + PurgeBuffers(disposing); + } + finally + { + // Close the underlying stream even if PurgeBuffers threw. + // Stream.Close() may throw here (may or may not be due to the same error). + // In this case, we still need to clean up internal resources, hence the inner finally blocks. + try + { + if (disposing) + { + _stream?.Dispose(); + } + } + finally + { + _stream = null; + + try + { + _inflater?.Dispose(); + } + finally + { + _inflater = null; + base.Dispose(disposing); + } + } + } + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/DeflateInput.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/DeflateInput.cs new file mode 100644 index 0000000000..faf26edbce --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/DeflateInput.cs @@ -0,0 +1,43 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System.Diagnostics; + +namespace SharpCompress.Compressors.Deflate64 +{ + internal sealed class DeflateInput + { + internal byte[] Buffer { get; set; } + internal int Count { get; set; } + internal int StartIndex { get; set; } + + internal void ConsumeBytes(int n) + { + Debug.Assert(n <= Count, "Should use more bytes than what we have in the buffer"); + StartIndex += n; + Count -= n; + Debug.Assert(StartIndex + Count <= Buffer.Length, "Input buffer is in invalid state!"); + } + + internal InputState DumpState() => new InputState(Count, StartIndex); + + internal void RestoreState(InputState state) + { + Count = state._count; + StartIndex = state._startIndex; + } + + internal /*readonly */struct InputState + { + internal readonly int _count; + internal readonly int _startIndex; + + internal InputState(int count, int startIndex) + { + _count = count; + _startIndex = startIndex; + } + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/FastEncoderStatus.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/FastEncoderStatus.cs new file mode 100644 index 0000000000..ecfae42b1f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/FastEncoderStatus.cs @@ -0,0 +1,245 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System.Diagnostics; + +namespace SharpCompress.Compressors.Deflate64 +{ + internal static class FastEncoderStatics + { + // static information for encoding, DO NOT MODIFY + + internal static readonly byte[] FAST_ENCODER_TREE_STRUCTURE_DATA = + { + 0xec,0xbd,0x07,0x60,0x1c,0x49,0x96,0x25,0x26,0x2f,0x6d,0xca, + 0x7b,0x7f,0x4a,0xf5,0x4a,0xd7,0xe0,0x74,0xa1,0x08,0x80,0x60, + 0x13,0x24,0xd8,0x90,0x40,0x10,0xec,0xc1,0x88,0xcd,0xe6,0x92, + 0xec,0x1d,0x69,0x47,0x23,0x29,0xab,0x2a,0x81,0xca,0x65,0x56, + 0x65,0x5d,0x66,0x16,0x40,0xcc,0xed,0x9d,0xbc,0xf7,0xde,0x7b, + 0xef,0xbd,0xf7,0xde,0x7b,0xef,0xbd,0xf7,0xba,0x3b,0x9d,0x4e, + 0x27,0xf7,0xdf,0xff,0x3f,0x5c,0x66,0x64,0x01,0x6c,0xf6,0xce, + 0x4a,0xda,0xc9,0x9e,0x21,0x80,0xaa,0xc8,0x1f,0x3f,0x7e,0x7c, + 0x1f,0x3f + }; + + internal static readonly byte[] B_FINAL_FAST_ENCODER_TREE_STRUCTURE_DATA = + { + 0xed,0xbd,0x07,0x60,0x1c,0x49,0x96,0x25,0x26,0x2f,0x6d,0xca, + 0x7b,0x7f,0x4a,0xf5,0x4a,0xd7,0xe0,0x74,0xa1,0x08,0x80,0x60, + 0x13,0x24,0xd8,0x90,0x40,0x10,0xec,0xc1,0x88,0xcd,0xe6,0x92, + 0xec,0x1d,0x69,0x47,0x23,0x29,0xab,0x2a,0x81,0xca,0x65,0x56, + 0x65,0x5d,0x66,0x16,0x40,0xcc,0xed,0x9d,0xbc,0xf7,0xde,0x7b, + 0xef,0xbd,0xf7,0xde,0x7b,0xef,0xbd,0xf7,0xba,0x3b,0x9d,0x4e, + 0x27,0xf7,0xdf,0xff,0x3f,0x5c,0x66,0x64,0x01,0x6c,0xf6,0xce, + 0x4a,0xda,0xc9,0x9e,0x21,0x80,0xaa,0xc8,0x1f,0x3f,0x7e,0x7c, + 0x1f,0x3f + }; + + // Output a currentMatch with length matchLen (>= MIN_MATCH) and displacement matchPos + // + // Optimisation: unlike the other encoders, here we have an array of codes for each currentMatch + // length (not just each currentMatch length slot), complete with all the extra bits filled in, in + // a single array element. + // + // There are many advantages to doing this: + // + // 1. A single array lookup on g_FastEncoderLiteralCodeInfo, instead of separate array lookups + // on g_LengthLookup (to get the length slot), g_FastEncoderLiteralTreeLength, + // g_FastEncoderLiteralTreeCode, g_ExtraLengthBits, and g_BitMask + // + // 2. The array is an array of ULONGs, so no access penalty, unlike for accessing those USHORT + // code arrays in the other encoders (although they could be made into ULONGs with some + // modifications to the source). + // + // Note, if we could guarantee that codeLen <= 16 always, then we could skip an if statement here. + // + // A completely different optimisation is used for the distance codes since, obviously, a table for + // all 8192 distances combining their extra bits is not feasible. The distance codeinfo table is + // made up of code[], len[] and # extraBits for this code. + // + // The advantages are similar to the above; a ULONG array instead of a USHORT and BYTE array, better + // cache locality, fewer memory operations. + // + + + // Encoding information for literal and Length. + // The least 5 significant bits are the length + // and the rest is the code bits. + + internal static readonly uint[] FAST_ENCODER_LITERAL_CODE_INFO = + { + 0x0000d7ee,0x0004d7ee,0x0002d7ee,0x0006d7ee,0x0001d7ee,0x0005d7ee,0x0003d7ee, + 0x0007d7ee,0x000037ee,0x0000c7ec,0x00000126,0x000437ee,0x000237ee,0x000637ee, + 0x000137ee,0x000537ee,0x000337ee,0x000737ee,0x0000b7ee,0x0004b7ee,0x0002b7ee, + 0x0006b7ee,0x0001b7ee,0x0005b7ee,0x0003b7ee,0x0007b7ee,0x000077ee,0x000477ee, + 0x000277ee,0x000677ee,0x000017ed,0x000177ee,0x00000526,0x000577ee,0x000023ea, + 0x0001c7ec,0x000377ee,0x000777ee,0x000217ed,0x000063ea,0x00000b68,0x00000ee9, + 0x00005beb,0x000013ea,0x00000467,0x00001b68,0x00000c67,0x00002ee9,0x00000768, + 0x00001768,0x00000f68,0x00001ee9,0x00001f68,0x00003ee9,0x000053ea,0x000001e9, + 0x000000e8,0x000021e9,0x000011e9,0x000010e8,0x000031e9,0x000033ea,0x000008e8, + 0x0000f7ee,0x0004f7ee,0x000018e8,0x000009e9,0x000004e8,0x000029e9,0x000014e8, + 0x000019e9,0x000073ea,0x0000dbeb,0x00000ce8,0x00003beb,0x0002f7ee,0x000039e9, + 0x00000bea,0x000005e9,0x00004bea,0x000025e9,0x000027ec,0x000015e9,0x000035e9, + 0x00000de9,0x00002bea,0x000127ec,0x0000bbeb,0x0006f7ee,0x0001f7ee,0x0000a7ec, + 0x00007beb,0x0005f7ee,0x0000fbeb,0x0003f7ee,0x0007f7ee,0x00000fee,0x00000326, + 0x00000267,0x00000a67,0x00000667,0x00000726,0x00001ce8,0x000002e8,0x00000e67, + 0x000000a6,0x0001a7ec,0x00002de9,0x000004a6,0x00000167,0x00000967,0x000002a6, + 0x00000567,0x000117ed,0x000006a6,0x000001a6,0x000005a6,0x00000d67,0x000012e8, + 0x00000ae8,0x00001de9,0x00001ae8,0x000007eb,0x000317ed,0x000067ec,0x000097ed, + 0x000297ed,0x00040fee,0x00020fee,0x00060fee,0x00010fee,0x00050fee,0x00030fee, + 0x00070fee,0x00008fee,0x00048fee,0x00028fee,0x00068fee,0x00018fee,0x00058fee, + 0x00038fee,0x00078fee,0x00004fee,0x00044fee,0x00024fee,0x00064fee,0x00014fee, + 0x00054fee,0x00034fee,0x00074fee,0x0000cfee,0x0004cfee,0x0002cfee,0x0006cfee, + 0x0001cfee,0x0005cfee,0x0003cfee,0x0007cfee,0x00002fee,0x00042fee,0x00022fee, + 0x00062fee,0x00012fee,0x00052fee,0x00032fee,0x00072fee,0x0000afee,0x0004afee, + 0x0002afee,0x0006afee,0x0001afee,0x0005afee,0x0003afee,0x0007afee,0x00006fee, + 0x00046fee,0x00026fee,0x00066fee,0x00016fee,0x00056fee,0x00036fee,0x00076fee, + 0x0000efee,0x0004efee,0x0002efee,0x0006efee,0x0001efee,0x0005efee,0x0003efee, + 0x0007efee,0x00001fee,0x00041fee,0x00021fee,0x00061fee,0x00011fee,0x00051fee, + 0x00031fee,0x00071fee,0x00009fee,0x00049fee,0x00029fee,0x00069fee,0x00019fee, + 0x00059fee,0x00039fee,0x00079fee,0x00005fee,0x00045fee,0x00025fee,0x00065fee, + 0x00015fee,0x00055fee,0x00035fee,0x00075fee,0x0000dfee,0x0004dfee,0x0002dfee, + 0x0006dfee,0x0001dfee,0x0005dfee,0x0003dfee,0x0007dfee,0x00003fee,0x00043fee, + 0x00023fee,0x00063fee,0x00013fee,0x00053fee,0x00033fee,0x00073fee,0x0000bfee, + 0x0004bfee,0x0002bfee,0x0006bfee,0x0001bfee,0x0005bfee,0x0003bfee,0x0007bfee, + 0x00007fee,0x00047fee,0x00027fee,0x00067fee,0x00017fee,0x000197ed,0x000397ed, + 0x000057ed,0x00057fee,0x000257ed,0x00037fee,0x000157ed,0x00077fee,0x000357ed, + 0x0000ffee,0x0004ffee,0x0002ffee,0x0006ffee,0x0001ffee,0x00000084,0x00000003, + 0x00000184,0x00000044,0x00000144,0x000000c5,0x000002c5,0x000001c5,0x000003c6, + 0x000007c6,0x00000026,0x00000426,0x000003a7,0x00000ba7,0x000007a7,0x00000fa7, + 0x00000227,0x00000627,0x00000a27,0x00000e27,0x00000068,0x00000868,0x00001068, + 0x00001868,0x00000369,0x00001369,0x00002369,0x00003369,0x000006ea,0x000026ea, + 0x000046ea,0x000066ea,0x000016eb,0x000036eb,0x000056eb,0x000076eb,0x000096eb, + 0x0000b6eb,0x0000d6eb,0x0000f6eb,0x00003dec,0x00007dec,0x0000bdec,0x0000fdec, + 0x00013dec,0x00017dec,0x0001bdec,0x0001fdec,0x00006bed,0x0000ebed,0x00016bed, + 0x0001ebed,0x00026bed,0x0002ebed,0x00036bed,0x0003ebed,0x000003ec,0x000043ec, + 0x000083ec,0x0000c3ec,0x000103ec,0x000143ec,0x000183ec,0x0001c3ec,0x00001bee, + 0x00009bee,0x00011bee,0x00019bee,0x00021bee,0x00029bee,0x00031bee,0x00039bee, + 0x00041bee,0x00049bee,0x00051bee,0x00059bee,0x00061bee,0x00069bee,0x00071bee, + 0x00079bee,0x000167f0,0x000367f0,0x000567f0,0x000767f0,0x000967f0,0x000b67f0, + 0x000d67f0,0x000f67f0,0x001167f0,0x001367f0,0x001567f0,0x001767f0,0x001967f0, + 0x001b67f0,0x001d67f0,0x001f67f0,0x000087ef,0x000187ef,0x000287ef,0x000387ef, + 0x000487ef,0x000587ef,0x000687ef,0x000787ef,0x000887ef,0x000987ef,0x000a87ef, + 0x000b87ef,0x000c87ef,0x000d87ef,0x000e87ef,0x000f87ef,0x0000e7f0,0x0002e7f0, + 0x0004e7f0,0x0006e7f0,0x0008e7f0,0x000ae7f0,0x000ce7f0,0x000ee7f0,0x0010e7f0, + 0x0012e7f0,0x0014e7f0,0x0016e7f0,0x0018e7f0,0x001ae7f0,0x001ce7f0,0x001ee7f0, + 0x0005fff3,0x000dfff3,0x0015fff3,0x001dfff3,0x0025fff3,0x002dfff3,0x0035fff3, + 0x003dfff3,0x0045fff3,0x004dfff3,0x0055fff3,0x005dfff3,0x0065fff3,0x006dfff3, + 0x0075fff3,0x007dfff3,0x0085fff3,0x008dfff3,0x0095fff3,0x009dfff3,0x00a5fff3, + 0x00adfff3,0x00b5fff3,0x00bdfff3,0x00c5fff3,0x00cdfff3,0x00d5fff3,0x00ddfff3, + 0x00e5fff3,0x00edfff3,0x00f5fff3,0x00fdfff3,0x0003fff3,0x000bfff3,0x0013fff3, + 0x001bfff3,0x0023fff3,0x002bfff3,0x0033fff3,0x003bfff3,0x0043fff3,0x004bfff3, + 0x0053fff3,0x005bfff3,0x0063fff3,0x006bfff3,0x0073fff3,0x007bfff3,0x0083fff3, + 0x008bfff3,0x0093fff3,0x009bfff3,0x00a3fff3,0x00abfff3,0x00b3fff3,0x00bbfff3, + 0x00c3fff3,0x00cbfff3,0x00d3fff3,0x00dbfff3,0x00e3fff3,0x00ebfff3,0x00f3fff3, + 0x00fbfff3,0x0007fff3,0x000ffff3,0x0017fff3,0x001ffff3,0x0027fff3,0x002ffff3, + 0x0037fff3,0x003ffff3,0x0047fff3,0x004ffff3,0x0057fff3,0x005ffff3,0x0067fff3, + 0x006ffff3,0x0077fff3,0x007ffff3,0x0087fff3,0x008ffff3,0x0097fff3,0x009ffff3, + 0x00a7fff3,0x00affff3,0x00b7fff3,0x00bffff3,0x00c7fff3,0x00cffff3,0x00d7fff3, + 0x00dffff3,0x00e7fff3,0x00effff3,0x00f7fff3,0x00fffff3,0x0001e7f1,0x0003e7f1, + 0x0005e7f1,0x0007e7f1,0x0009e7f1,0x000be7f1,0x000de7f1,0x000fe7f1,0x0011e7f1, + 0x0013e7f1,0x0015e7f1,0x0017e7f1,0x0019e7f1,0x001be7f1,0x001de7f1,0x001fe7f1, + 0x0021e7f1,0x0023e7f1,0x0025e7f1,0x0027e7f1,0x0029e7f1,0x002be7f1,0x002de7f1, + 0x002fe7f1,0x0031e7f1,0x0033e7f1,0x0035e7f1,0x0037e7f1,0x0039e7f1,0x003be7f1, + 0x003de7f1,0x000047eb + }; + + internal static readonly uint[] FAST_ENCODER_DISTANCE_CODE_INFO = + { + 0x00000f06,0x0001ff0a,0x0003ff0b,0x0007ff0b,0x0000ff19,0x00003f18,0x0000bf28, + 0x00007f28,0x00001f37,0x00005f37,0x00000d45,0x00002f46,0x00000054,0x00001d55, + 0x00000864,0x00000365,0x00000474,0x00001375,0x00000c84,0x00000284,0x00000a94, + 0x00000694,0x00000ea4,0x000001a4,0x000009b4,0x00000bb5,0x000005c4,0x00001bc5, + 0x000007d5,0x000017d5,0x00000000,0x00000100 + }; + + internal static readonly uint[] BIT_MASK = { 0, 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767 }; + internal static readonly byte[] EXTRA_LENGTH_BITS = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 }; + internal static readonly byte[] EXTRA_DISTANCE_BITS = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 0, 0 }; + internal const int NUM_CHARS = 256; + internal const int NUM_LENGTH_BASE_CODES = 29; + internal const int NUM_DIST_BASE_CODES = 30; + + internal const uint FAST_ENCODER_POST_TREE_BIT_BUF = 0x0022; + internal const int FAST_ENCODER_POST_TREE_BIT_COUNT = 9; + + internal const uint NO_COMPRESSION_HEADER = 0x0; + internal const int NO_COMPRESSION_HEADER_BIT_COUNT = 3; + internal const uint B_FINAL_NO_COMPRESSION_HEADER = 0x1; + internal const int B_FINAL_NO_COMPRESSION_HEADER_BIT_COUNT = 3; + internal const int MAX_CODE_LEN = 16; + + private static readonly byte[] S_DIST_LOOKUP = CreateDistanceLookup(); + + private static byte[] CreateDistanceLookup() + { + byte[] result = new byte[512]; + + // Generate the global slot tables which allow us to convert a distance + // (0..32K) to a distance slot (0..29) + // + // Distance table + // Extra Extra Extra + // Code Bits Dist Code Bits Dist Code Bits Distance + // ---- ---- ---- ---- ---- ------ ---- ---- -------- + // 0 0 1 10 4 33-48 20 9 1025-1536 + // 1 0 2 11 4 49-64 21 9 1537-2048 + // 2 0 3 12 5 65-96 22 10 2049-3072 + // 3 0 4 13 5 97-128 23 10 3073-4096 + // 4 1 5,6 14 6 129-192 24 11 4097-6144 + // 5 1 7,8 15 6 193-256 25 11 6145-8192 + // 6 2 9-12 16 7 257-384 26 12 8193-12288 + // 7 2 13-16 17 7 385-512 27 12 12289-16384 + // 8 3 17-24 18 8 513-768 28 13 16385-24576 + // 9 3 25-32 19 8 769-1024 29 13 24577-32768 + + // Initialize the mapping length (0..255) -> length code (0..28) + //int length = 0; + //for (code = 0; code < FastEncoderStatics.NumLengthBaseCodes-1; code++) { + // for (int n = 0; n < (1 << FastEncoderStatics.ExtraLengthBits[code]); n++) + // lengthLookup[length++] = (byte) code; + //} + //lengthLookup[length-1] = (byte) code; + + // Initialize the mapping dist (0..32K) -> dist code (0..29) + int dist = 0; + int code; + for (code = 0; code < 16; code++) + { + for (int n = 0; n < (1 << EXTRA_DISTANCE_BITS[code]); n++) + result[dist++] = (byte)code; + } + + dist >>= 7; // from now on, all distances are divided by 128 + + for (; code < NUM_DIST_BASE_CODES; code++) + { + for (int n = 0; n < (1 << (EXTRA_DISTANCE_BITS[code] - 7)); n++) + result[256 + dist++] = (byte)code; + } + + return result; + } + + // Return the position slot (0...29) of a match offset (0...32767) + internal static int GetSlot(int pos) => + S_DIST_LOOKUP[((pos) < 256) ? (pos) : (256 + ((pos) >> 7))]; + + // Reverse 'length' of the bits in code + public static uint BitReverse(uint code, int length) + { + uint newCode = 0; + + Debug.Assert(length > 0 && length <= 16, "Invalid len"); + do + { + newCode |= (code & 1); + newCode <<= 1; + code >>= 1; + } while (--length > 0); + + return newCode >> 1; + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/HuffmanTree.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/HuffmanTree.cs new file mode 100644 index 0000000000..62c24b59ed --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/HuffmanTree.cs @@ -0,0 +1,311 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System.Diagnostics; +using System.IO; + +namespace SharpCompress.Compressors.Deflate64 +{ + // Strictly speaking this class is not a HuffmanTree, this class is + // a lookup table combined with a HuffmanTree. The idea is to speed up + // the lookup for short symbols (they should appear more frequently ideally.) + // However we don't want to create a huge table since it might take longer to + // build the table than decoding (Deflate usually generates new tables frequently.) + // + // Jean-loup Gailly and Mark Adler gave a very good explanation about this. + // The full text (algorithm.txt) can be found inside + // ftp://ftp.uu.net/pub/archiving/zip/zlib/zlib.zip. + // + // Following paper explains decoding in details: + // Hirschberg and Lelewer, "Efficient decoding of prefix codes," + // Comm. ACM, 33,4, April 1990, pp. 449-459. + // + + internal sealed class HuffmanTree + { + internal const int MAX_LITERAL_TREE_ELEMENTS = 288; + internal const int MAX_DIST_TREE_ELEMENTS = 32; + internal const int END_OF_BLOCK_CODE = 256; + internal const int NUMBER_OF_CODE_LENGTH_TREE_ELEMENTS = 19; + + private readonly int _tableBits; + private readonly short[] _table; + private readonly short[] _left; + private readonly short[] _right; + private readonly byte[] _codeLengthArray; +#if DEBUG + private uint[] _codeArrayDebug; +#endif + + private readonly int _tableMask; + + // huffman tree for static block + public static HuffmanTree StaticLiteralLengthTree { get; } = new HuffmanTree(GetStaticLiteralTreeLength()); + + public static HuffmanTree StaticDistanceTree { get; } = new HuffmanTree(GetStaticDistanceTreeLength()); + + public HuffmanTree(byte[] codeLengths) + { + Debug.Assert( + codeLengths.Length == MAX_LITERAL_TREE_ELEMENTS || + codeLengths.Length == MAX_DIST_TREE_ELEMENTS || + codeLengths.Length == NUMBER_OF_CODE_LENGTH_TREE_ELEMENTS, + "we only expect three kinds of Length here"); + _codeLengthArray = codeLengths; + + if (_codeLengthArray.Length == MAX_LITERAL_TREE_ELEMENTS) + { + // bits for Literal/Length tree table + _tableBits = 9; + } + else + { + // bits for distance tree table and code length tree table + _tableBits = 7; + } + _tableMask = (1 << _tableBits) - 1; + + _table = new short[1 << _tableBits]; + + // I need to find proof that left and right array will always be + // enough. I think they are. + _left = new short[2 * _codeLengthArray.Length]; + _right = new short[2 * _codeLengthArray.Length]; + + CreateTable(); + } + + // Generate the array contains huffman codes lengths for static huffman tree. + // The data is in RFC 1951. + private static byte[] GetStaticLiteralTreeLength() + { + byte[] literalTreeLength = new byte[MAX_LITERAL_TREE_ELEMENTS]; + for (int i = 0; i <= 143; i++) + literalTreeLength[i] = 8; + + for (int i = 144; i <= 255; i++) + literalTreeLength[i] = 9; + + for (int i = 256; i <= 279; i++) + literalTreeLength[i] = 7; + + for (int i = 280; i <= 287; i++) + literalTreeLength[i] = 8; + + return literalTreeLength; + } + + private static byte[] GetStaticDistanceTreeLength() + { + byte[] staticDistanceTreeLength = new byte[MAX_DIST_TREE_ELEMENTS]; + for (int i = 0; i < MAX_DIST_TREE_ELEMENTS; i++) + { + staticDistanceTreeLength[i] = 5; + } + return staticDistanceTreeLength; + } + + // Calculate the huffman code for each character based on the code length for each character. + // This algorithm is described in standard RFC 1951 + private uint[] CalculateHuffmanCode() + { + uint[] bitLengthCount = new uint[17]; + foreach (int codeLength in _codeLengthArray) + { + bitLengthCount[codeLength]++; + } + bitLengthCount[0] = 0; // clear count for length 0 + + uint[] nextCode = new uint[17]; + uint tempCode = 0; + for (int bits = 1; bits <= 16; bits++) + { + tempCode = (tempCode + bitLengthCount[bits - 1]) << 1; + nextCode[bits] = tempCode; + } + + uint[] code = new uint[MAX_LITERAL_TREE_ELEMENTS]; + for (int i = 0; i < _codeLengthArray.Length; i++) + { + int len = _codeLengthArray[i]; + + if (len > 0) + { + code[i] = FastEncoderStatics.BitReverse(nextCode[len], len); + nextCode[len]++; + } + } + return code; + } + + private void CreateTable() + { + uint[] codeArray = CalculateHuffmanCode(); +#if DEBUG + _codeArrayDebug = codeArray; +#endif + + short avail = (short)_codeLengthArray.Length; + + for (int ch = 0; ch < _codeLengthArray.Length; ch++) + { + // length of this code + int len = _codeLengthArray[ch]; + if (len > 0) + { + // start value (bit reversed) + int start = (int)codeArray[ch]; + + if (len <= _tableBits) + { + // If a particular symbol is shorter than nine bits, + // then that symbol's translation is duplicated + // in all those entries that start with that symbol's bits. + // For example, if the symbol is four bits, then it's duplicated + // 32 times in a nine-bit table. If a symbol is nine bits long, + // it appears in the table once. + // + // Make sure that in the loop below, code is always + // less than table_size. + // + // On last iteration we store at array index: + // initial_start_at + (locs-1)*increment + // = initial_start_at + locs*increment - increment + // = initial_start_at + (1 << tableBits) - increment + // = initial_start_at + table_size - increment + // + // Therefore we must ensure: + // initial_start_at + table_size - increment < table_size + // or: initial_start_at < increment + // + int increment = 1 << len; + if (start >= increment) + { + throw new InvalidDataException("Deflate64: invalid Huffman data"); + } + + // Note the bits in the table are reverted. + int locs = 1 << (_tableBits - len); + for (int j = 0; j < locs; j++) + { + _table[start] = (short)ch; + start += increment; + } + } + else + { + // For any code which has length longer than num_elements, + // build a binary tree. + + int overflowBits = len - _tableBits; // the nodes we need to respent the data. + int codeBitMask = 1 << _tableBits; // mask to get current bit (the bits can't fit in the table) + + // the left, right table is used to repesent the + // the rest bits. When we got the first part (number bits.) and look at + // tbe table, we will need to follow the tree to find the real character. + // This is in place to avoid bloating the table if there are + // a few ones with long code. + int index = start & ((1 << _tableBits) - 1); + short[] array = _table; + + do + { + short value = array[index]; + + if (value == 0) + { + // set up next pointer if this node is not used before. + array[index] = (short)-avail; // use next available slot. + value = (short)-avail; + avail++; + } + + if (value > 0) + { + // prevent an IndexOutOfRangeException from array[index] + throw new InvalidDataException("Deflate64: invalid Huffman data"); + } + + Debug.Assert(value < 0, "CreateTable: Only negative numbers are used for tree pointers!"); + + if ((start & codeBitMask) == 0) + { + // if current bit is 0, go change the left array + array = _left; + } + else + { + // if current bit is 1, set value in the right array + array = _right; + } + index = -value; // go to next node + + codeBitMask <<= 1; + overflowBits--; + } while (overflowBits != 0); + + array[index] = (short)ch; + } + } + } + } + + // + // This function will try to get enough bits from input and + // try to decode the bits. + // If there are no enought bits in the input, this function will return -1. + // + public int GetNextSymbol(InputBuffer input) + { + // Try to load 16 bits into input buffer if possible and get the bitBuffer value. + // If there aren't 16 bits available we will return all we have in the + // input buffer. + uint bitBuffer = input.TryLoad16Bits(); + if (input.AvailableBits == 0) + { // running out of input. + return -1; + } + + // decode an element + int symbol = _table[bitBuffer & _tableMask]; + if (symbol < 0) + { // this will be the start of the binary tree + // navigate the tree + uint mask = (uint)1 << _tableBits; + do + { + symbol = -symbol; + if ((bitBuffer & mask) == 0) + symbol = _left[symbol]; + else + symbol = _right[symbol]; + mask <<= 1; + } while (symbol < 0); + } + + int codeLength = _codeLengthArray[symbol]; + + // huffman code lengths must be at least 1 bit long + if (codeLength <= 0) + { + throw new InvalidDataException("Deflate64: invalid Huffman data"); + } + + // + // If this code is longer than the # bits we had in the bit buffer (i.e. + // we read only part of the code), we can hit the entry in the table or the tree + // for another symbol. However the length of another symbol will not match the + // available bits count. + if (codeLength > input.AvailableBits) + { + // We already tried to load 16 bits and maximum length is 15, + // so this means we are running out of input. + return -1; + } + + input.SkipBits(codeLength); + return symbol; + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InflaterManaged.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InflaterManaged.cs new file mode 100644 index 0000000000..7ce389b7cb --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InflaterManaged.cs @@ -0,0 +1,738 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +// +// zlib.h -- interface of the 'zlib' general purpose compression library +// version 1.2.1, November 17th, 2003 +// +// Copyright (C) 1995-2003 Jean-loup Gailly and Mark Adler +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// +// + +using System; +using System.Diagnostics; +using System.IO; + +namespace SharpCompress.Compressors.Deflate64 +{ + internal sealed class InflaterManaged + { + // const tables used in decoding: + + // Extra bits for length code 257 - 285. + private static readonly byte[] S_EXTRA_LENGTH_BITS = + { 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,16 }; + + // The base length for length code 257 - 285. + // The formula to get the real length for a length code is lengthBase[code - 257] + (value stored in extraBits) + private static readonly int[] S_LENGTH_BASE = + { 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,3}; + + // The base distance for distance code 0 - 31 + // The real distance for a distance code is distanceBasePosition[code] + (value stored in extraBits) + private static readonly int[] S_DISTANCE_BASE_POSITION = + { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,32769,49153 }; + + // code lengths for code length alphabet is stored in following order + private static readonly byte[] S_CODE_ORDER = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; + + private static readonly byte[] S_STATIC_DISTANCE_TREE_TABLE = + { + 0x00,0x10,0x08,0x18,0x04,0x14,0x0c,0x1c,0x02,0x12,0x0a,0x1a, + 0x06,0x16,0x0e,0x1e,0x01,0x11,0x09,0x19,0x05,0x15,0x0d,0x1d, + 0x03,0x13,0x0b,0x1b,0x07,0x17,0x0f,0x1f + }; + + private readonly OutputWindow _output; + private readonly InputBuffer _input; + private HuffmanTree _literalLengthTree; + private HuffmanTree _distanceTree; + + private InflaterState _state; + //private bool _hasFormatReader; + private int _bfinal; + private BlockType _blockType; + + // uncompressed block + private readonly byte[] _blockLengthBuffer = new byte[4]; + private int _blockLength; + + // compressed block + private int _length; + private int _distanceCode; + private int _extraBits; + + private int _loopCounter; + private int _literalLengthCodeCount; + private int _distanceCodeCount; + private int _codeLengthCodeCount; + private int _codeArraySize; + private int _lengthCode; + + private readonly byte[] _codeList; // temporary array to store the code length for literal/Length and distance + private readonly byte[] _codeLengthTreeCodeLength; + private readonly bool _deflate64; + private HuffmanTree _codeLengthTree; + + //private IFileFormatReader _formatReader; // class to decode header and footer (e.g. gzip) + + internal InflaterManaged(/*IFileFormatReader reader, */bool deflate64) + { + _output = new OutputWindow(); + _input = new InputBuffer(); + + _codeList = new byte[HuffmanTree.MAX_LITERAL_TREE_ELEMENTS + HuffmanTree.MAX_DIST_TREE_ELEMENTS]; + _codeLengthTreeCodeLength = new byte[HuffmanTree.NUMBER_OF_CODE_LENGTH_TREE_ELEMENTS]; + _deflate64 = deflate64; + //if (reader != null) + //{ + // _formatReader = reader; + // _hasFormatReader = true; + //} + Reset(); + } + + private void Reset() + { + _state = //_hasFormatReader ? + //InflaterState.ReadingHeader : // start by reading Header info + InflaterState.ReadingBFinal; // start by reading BFinal bit + } + + public void SetInput(byte[] inputBytes, int offset, int length) => + _input.SetInput(inputBytes, offset, length); // append the bytes + + public bool Finished() => _state == InflaterState.Done || _state == InflaterState.VerifyingFooter; + + public int AvailableOutput => _output.AvailableBytes; + + public int Inflate(byte[] bytes, int offset, int length) + { + // copy bytes from output to outputbytes if we have available bytes + // if buffer is not filled up. keep decoding until no input are available + // if decodeBlock returns false. Throw an exception. + int count = 0; + do + { + int copied = _output.CopyTo(bytes, offset, length); + if (copied > 0) + { + //if (_hasFormatReader) + //{ + // _formatReader.UpdateWithBytesRead(bytes, offset, copied); + //} + + offset += copied; + count += copied; + length -= copied; + } + + if (length == 0) + { // filled in the bytes array + break; + } + // Decode will return false when more input is needed + } while (!Finished() && Decode()); + + if (_state == InflaterState.VerifyingFooter) + { // finished reading CRC + // In this case finished is true and output window has all the data. + // But some data in output window might not be copied out. + if (_output.AvailableBytes == 0) + { + //_formatReader.Validate(); + } + } + + return count; + } + + //Each block of compressed data begins with 3 header bits + // containing the following data: + // first bit BFINAL + // next 2 bits BTYPE + // Note that the header bits do not necessarily begin on a byte + // boundary, since a block does not necessarily occupy an integral + // number of bytes. + // BFINAL is set if and only if this is the last block of the data + // set. + // BTYPE specifies how the data are compressed, as follows: + // 00 - no compression + // 01 - compressed with fixed Huffman codes + // 10 - compressed with dynamic Huffman codes + // 11 - reserved (error) + // The only difference between the two compressed cases is how the + // Huffman codes for the literal/length and distance alphabets are + // defined. + // + // This function returns true for success (end of block or output window is full,) + // false if we are short of input + // + private bool Decode() + { + bool eob = false; + bool result = false; + + if (Finished()) + { + return true; + } + + //if (_hasFormatReader) + //{ + // if (_state == InflaterState.ReadingHeader) + // { + // if (!_formatReader.ReadHeader(_input)) + // { + // return false; + // } + // _state = InflaterState.ReadingBFinal; + // } + // else if (_state == InflaterState.StartReadingFooter || _state == InflaterState.ReadingFooter) + // { + // if (!_formatReader.ReadFooter(_input)) + // return false; + + // _state = InflaterState.VerifyingFooter; + // return true; + // } + //} + + if (_state == InflaterState.ReadingBFinal) + { + // reading bfinal bit + // Need 1 bit + if (!_input.EnsureBitsAvailable(1)) + return false; + + _bfinal = _input.GetBits(1); + _state = InflaterState.ReadingBType; + } + + if (_state == InflaterState.ReadingBType) + { + // Need 2 bits + if (!_input.EnsureBitsAvailable(2)) + { + _state = InflaterState.ReadingBType; + return false; + } + + _blockType = (BlockType)_input.GetBits(2); + if (_blockType == BlockType.Dynamic) + { + _state = InflaterState.ReadingNumLitCodes; + } + else if (_blockType == BlockType.Static) + { + _literalLengthTree = HuffmanTree.StaticLiteralLengthTree; + _distanceTree = HuffmanTree.StaticDistanceTree; + _state = InflaterState.DecodeTop; + } + else if (_blockType == BlockType.Uncompressed) + { + _state = InflaterState.UncompressedAligning; + } + else + { + throw new InvalidDataException("Deflate64: unknown block type"); + } + } + + if (_blockType == BlockType.Dynamic) + { + if (_state < InflaterState.DecodeTop) + { + // we are reading the header + result = DecodeDynamicBlockHeader(); + } + else + { + result = DecodeBlock(out eob); // this can returns true when output is full + } + } + else if (_blockType == BlockType.Static) + { + result = DecodeBlock(out eob); + } + else if (_blockType == BlockType.Uncompressed) + { + result = DecodeUncompressedBlock(out eob); + } + else + { + throw new InvalidDataException("Deflate64: unknown block type"); + } + + // + // If we reached the end of the block and the block we were decoding had + // bfinal=1 (final block) + // + if (eob && (_bfinal != 0)) + { + //if (_hasFormatReader) + // _state = InflaterState.StartReadingFooter; + //else + _state = InflaterState.Done; + } + return result; + } + + + // Format of Non-compressed blocks (BTYPE=00): + // + // Any bits of input up to the next byte boundary are ignored. + // The rest of the block consists of the following information: + // + // 0 1 2 3 4... + // +---+---+---+---+================================+ + // | LEN | NLEN |... LEN bytes of literal data...| + // +---+---+---+---+================================+ + // + // LEN is the number of data bytes in the block. NLEN is the + // one's complement of LEN. + private bool DecodeUncompressedBlock(out bool endOfBlock) + { + endOfBlock = false; + while (true) + { + switch (_state) + { + case InflaterState.UncompressedAligning: // initial state when calling this function + // we must skip to a byte boundary + _input.SkipToByteBoundary(); + _state = InflaterState.UncompressedByte1; + goto case InflaterState.UncompressedByte1; + + case InflaterState.UncompressedByte1: // decoding block length + case InflaterState.UncompressedByte2: + case InflaterState.UncompressedByte3: + case InflaterState.UncompressedByte4: + int bits = _input.GetBits(8); + if (bits < 0) + { + return false; + } + + _blockLengthBuffer[_state - InflaterState.UncompressedByte1] = (byte)bits; + if (_state == InflaterState.UncompressedByte4) + { + _blockLength = _blockLengthBuffer[0] + ((int)_blockLengthBuffer[1]) * 256; + int blockLengthComplement = _blockLengthBuffer[2] + ((int)_blockLengthBuffer[3]) * 256; + + // make sure complement matches + if ((ushort)_blockLength != (ushort)(~blockLengthComplement)) + { + throw new InvalidDataException("Deflate64: invalid block length"); + } + } + + _state += 1; + break; + + case InflaterState.DecodingUncompressed: // copying block data + + // Directly copy bytes from input to output. + int bytesCopied = _output.CopyFrom(_input, _blockLength); + _blockLength -= bytesCopied; + + if (_blockLength == 0) + { + // Done with this block, need to re-init bit buffer for next block + _state = InflaterState.ReadingBFinal; + endOfBlock = true; + return true; + } + + // We can fail to copy all bytes for two reasons: + // Running out of Input + // running out of free space in output window + if (_output.FreeBytes == 0) + { + return true; + } + + return false; + + default: + Debug./*Fail*/Assert(false, "check why we are here!"); + throw new InvalidDataException("Deflate64: unknown state"); + } + } + } + + private bool DecodeBlock(out bool endOfBlockCodeSeen) + { + endOfBlockCodeSeen = false; + + int freeBytes = _output.FreeBytes; // it is a little bit faster than frequently accessing the property + while (freeBytes > 65536) + { + // With Deflate64 we can have up to a 64kb length, so we ensure at least that much space is available + // in the OutputWindow to avoid overwriting previous unflushed output data. + + int symbol; + switch (_state) + { + case InflaterState.DecodeTop: + // decode an element from the literal tree + + // TODO: optimize this!!! + symbol = _literalLengthTree.GetNextSymbol(_input); + if (symbol < 0) + { + // running out of input + return false; + } + + if (symbol < 256) + { + // literal + _output.Write((byte)symbol); + --freeBytes; + } + else if (symbol == 256) + { + // end of block + endOfBlockCodeSeen = true; + // Reset state + _state = InflaterState.ReadingBFinal; + return true; + } + else + { + // length/distance pair + symbol -= 257; // length code started at 257 + if (symbol < 8) + { + symbol += 3; // match length = 3,4,5,6,7,8,9,10 + _extraBits = 0; + } + else if (!_deflate64 && symbol == 28) + { + // extra bits for code 285 is 0 + symbol = 258; // code 285 means length 258 + _extraBits = 0; + } + else + { + if (symbol < 0 || symbol >= S_EXTRA_LENGTH_BITS.Length) + { + throw new InvalidDataException("Deflate64: invalid data"); + } + _extraBits = S_EXTRA_LENGTH_BITS[symbol]; + Debug.Assert(_extraBits != 0, "We handle other cases separately!"); + } + _length = symbol; + goto case InflaterState.HaveInitialLength; + } + break; + + case InflaterState.HaveInitialLength: + if (_extraBits > 0) + { + _state = InflaterState.HaveInitialLength; + int bits = _input.GetBits(_extraBits); + if (bits < 0) + { + return false; + } + + if (_length < 0 || _length >= S_LENGTH_BASE.Length) + { + throw new InvalidDataException("Deflate64: invalid data"); + } + _length = S_LENGTH_BASE[_length] + bits; + } + _state = InflaterState.HaveFullLength; + goto case InflaterState.HaveFullLength; + + case InflaterState.HaveFullLength: + if (_blockType == BlockType.Dynamic) + { + _distanceCode = _distanceTree.GetNextSymbol(_input); + } + else + { + // get distance code directly for static block + _distanceCode = _input.GetBits(5); + if (_distanceCode >= 0) + { + _distanceCode = S_STATIC_DISTANCE_TREE_TABLE[_distanceCode]; + } + } + + if (_distanceCode < 0) + { + // running out input + return false; + } + + _state = InflaterState.HaveDistCode; + goto case InflaterState.HaveDistCode; + + case InflaterState.HaveDistCode: + // To avoid a table lookup we note that for distanceCode > 3, + // extra_bits = (distanceCode-2) >> 1 + int offset; + if (_distanceCode > 3) + { + _extraBits = (_distanceCode - 2) >> 1; + int bits = _input.GetBits(_extraBits); + if (bits < 0) + { + return false; + } + offset = S_DISTANCE_BASE_POSITION[_distanceCode] + bits; + } + else + { + offset = _distanceCode + 1; + } + + _output.WriteLengthDistance(_length, offset); + freeBytes -= _length; + _state = InflaterState.DecodeTop; + break; + + default: + Debug./*Fail*/Assert(false, "check why we are here!"); + throw new InvalidDataException("Deflate64: unknown state"); + } + } + + return true; + } + + + // Format of the dynamic block header: + // 5 Bits: HLIT, # of Literal/Length codes - 257 (257 - 286) + // 5 Bits: HDIST, # of Distance codes - 1 (1 - 32) + // 4 Bits: HCLEN, # of Code Length codes - 4 (4 - 19) + // + // (HCLEN + 4) x 3 bits: code lengths for the code length + // alphabet given just above, in the order: 16, 17, 18, + // 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 + // + // These code lengths are interpreted as 3-bit integers + // (0-7); as above, a code length of 0 means the + // corresponding symbol (literal/length or distance code + // length) is not used. + // + // HLIT + 257 code lengths for the literal/length alphabet, + // encoded using the code length Huffman code + // + // HDIST + 1 code lengths for the distance alphabet, + // encoded using the code length Huffman code + // + // The code length repeat codes can cross from HLIT + 257 to the + // HDIST + 1 code lengths. In other words, all code lengths form + // a single sequence of HLIT + HDIST + 258 values. + private bool DecodeDynamicBlockHeader() + { + switch (_state) + { + case InflaterState.ReadingNumLitCodes: + _literalLengthCodeCount = _input.GetBits(5); + if (_literalLengthCodeCount < 0) + { + return false; + } + _literalLengthCodeCount += 257; + _state = InflaterState.ReadingNumDistCodes; + goto case InflaterState.ReadingNumDistCodes; + + case InflaterState.ReadingNumDistCodes: + _distanceCodeCount = _input.GetBits(5); + if (_distanceCodeCount < 0) + { + return false; + } + _distanceCodeCount += 1; + _state = InflaterState.ReadingNumCodeLengthCodes; + goto case InflaterState.ReadingNumCodeLengthCodes; + + case InflaterState.ReadingNumCodeLengthCodes: + _codeLengthCodeCount = _input.GetBits(4); + if (_codeLengthCodeCount < 0) + { + return false; + } + _codeLengthCodeCount += 4; + _loopCounter = 0; + _state = InflaterState.ReadingCodeLengthCodes; + goto case InflaterState.ReadingCodeLengthCodes; + + case InflaterState.ReadingCodeLengthCodes: + while (_loopCounter < _codeLengthCodeCount) + { + int bits = _input.GetBits(3); + if (bits < 0) + { + return false; + } + _codeLengthTreeCodeLength[S_CODE_ORDER[_loopCounter]] = (byte)bits; + ++_loopCounter; + } + + for (int i = _codeLengthCodeCount; i < S_CODE_ORDER.Length; i++) + { + _codeLengthTreeCodeLength[S_CODE_ORDER[i]] = 0; + } + + // create huffman tree for code length + _codeLengthTree = new HuffmanTree(_codeLengthTreeCodeLength); + _codeArraySize = _literalLengthCodeCount + _distanceCodeCount; + _loopCounter = 0; // reset loop count + + _state = InflaterState.ReadingTreeCodesBefore; + goto case InflaterState.ReadingTreeCodesBefore; + + case InflaterState.ReadingTreeCodesBefore: + case InflaterState.ReadingTreeCodesAfter: + while (_loopCounter < _codeArraySize) + { + if (_state == InflaterState.ReadingTreeCodesBefore) + { + if ((_lengthCode = _codeLengthTree.GetNextSymbol(_input)) < 0) + { + return false; + } + } + + // The alphabet for code lengths is as follows: + // 0 - 15: Represent code lengths of 0 - 15 + // 16: Copy the previous code length 3 - 6 times. + // The next 2 bits indicate repeat length + // (0 = 3, ... , 3 = 6) + // Example: Codes 8, 16 (+2 bits 11), + // 16 (+2 bits 10) will expand to + // 12 code lengths of 8 (1 + 6 + 5) + // 17: Repeat a code length of 0 for 3 - 10 times. + // (3 bits of length) + // 18: Repeat a code length of 0 for 11 - 138 times + // (7 bits of length) + if (_lengthCode <= 15) + { + _codeList[_loopCounter++] = (byte)_lengthCode; + } + else + { + int repeatCount; + if (_lengthCode == 16) + { + if (!_input.EnsureBitsAvailable(2)) + { + _state = InflaterState.ReadingTreeCodesAfter; + return false; + } + + if (_loopCounter == 0) + { + // can't have "prev code" on first code + throw new InvalidDataException(); + } + + byte previousCode = _codeList[_loopCounter - 1]; + repeatCount = _input.GetBits(2) + 3; + + if (_loopCounter + repeatCount > _codeArraySize) + { + throw new InvalidDataException(); + } + + for (int j = 0; j < repeatCount; j++) + { + _codeList[_loopCounter++] = previousCode; + } + } + else if (_lengthCode == 17) + { + if (!_input.EnsureBitsAvailable(3)) + { + _state = InflaterState.ReadingTreeCodesAfter; + return false; + } + + repeatCount = _input.GetBits(3) + 3; + + if (_loopCounter + repeatCount > _codeArraySize) + { + throw new InvalidDataException(); + } + + for (int j = 0; j < repeatCount; j++) + { + _codeList[_loopCounter++] = 0; + } + } + else + { + // code == 18 + if (!_input.EnsureBitsAvailable(7)) + { + _state = InflaterState.ReadingTreeCodesAfter; + return false; + } + + repeatCount = _input.GetBits(7) + 11; + + if (_loopCounter + repeatCount > _codeArraySize) + { + throw new InvalidDataException(); + } + + for (int j = 0; j < repeatCount; j++) + { + _codeList[_loopCounter++] = 0; + } + } + } + _state = InflaterState.ReadingTreeCodesBefore; // we want to read the next code. + } + break; + + default: + Debug./*Fail*/Assert(false, "check why we are here!"); + throw new InvalidDataException("Deflate64: unknown state"); + } + + byte[] literalTreeCodeLength = new byte[HuffmanTree.MAX_LITERAL_TREE_ELEMENTS]; + byte[] distanceTreeCodeLength = new byte[HuffmanTree.MAX_DIST_TREE_ELEMENTS]; + + // Create literal and distance tables + Array.Copy(_codeList, 0, literalTreeCodeLength, 0, _literalLengthCodeCount); + Array.Copy(_codeList, _literalLengthCodeCount, distanceTreeCodeLength, 0, _distanceCodeCount); + + // Make sure there is an end-of-block code, otherwise how could we ever end? + if (literalTreeCodeLength[HuffmanTree.END_OF_BLOCK_CODE] == 0) + { + throw new InvalidDataException(); + } + + _literalLengthTree = new HuffmanTree(literalTreeCodeLength); + _distanceTree = new HuffmanTree(distanceTreeCodeLength); + _state = InflaterState.DecodeTop; + return true; + } + + public void Dispose() { } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InflaterState.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InflaterState.cs new file mode 100644 index 0000000000..356ea88f43 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InflaterState.cs @@ -0,0 +1,42 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +namespace SharpCompress.Compressors.Deflate64 +{ + // Do not rearrange the enum values. + internal enum InflaterState + { + ReadingHeader = 0, // Only applies to GZIP + + ReadingBFinal = 2, // About to read bfinal bit + ReadingBType = 3, // About to read blockType bits + + ReadingNumLitCodes = 4, // About to read # literal codes + ReadingNumDistCodes = 5, // About to read # dist codes + ReadingNumCodeLengthCodes = 6, // About to read # code length codes + ReadingCodeLengthCodes = 7, // In the middle of reading the code length codes + ReadingTreeCodesBefore = 8, // In the middle of reading tree codes (loop top) + ReadingTreeCodesAfter = 9, // In the middle of reading tree codes (extension; code > 15) + + DecodeTop = 10, // About to decode a literal (char/match) in a compressed block + HaveInitialLength = 11, // Decoding a match, have the literal code (base length) + HaveFullLength = 12, // Ditto, now have the full match length (incl. extra length bits) + HaveDistCode = 13, // Ditto, now have the distance code also, need extra dist bits + + /* uncompressed blocks */ + UncompressedAligning = 15, + UncompressedByte1 = 16, + UncompressedByte2 = 17, + UncompressedByte3 = 18, + UncompressedByte4 = 19, + DecodingUncompressed = 20, + + // These three apply only to GZIP + StartReadingFooter = 21, // (Initialisation for reading footer) + ReadingFooter = 22, + VerifyingFooter = 23, + + Done = 24 // Finished + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InputBuffer.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InputBuffer.cs new file mode 100644 index 0000000000..0f58538833 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/InputBuffer.cs @@ -0,0 +1,202 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; +using System.Diagnostics; + +namespace SharpCompress.Compressors.Deflate64 +{ + // This class can be used to read bits from an byte array quickly. + // Normally we get bits from 'bitBuffer' field and bitsInBuffer stores + // the number of bits available in 'BitBuffer'. + // When we used up the bits in bitBuffer, we will try to get byte from + // the byte array and copy the byte to appropiate position in bitBuffer. + // + // The byte array is not reused. We will go from 'start' to 'end'. + // When we reach the end, most read operations will return -1, + // which means we are running out of input. + + internal sealed class InputBuffer + { + private byte[] _buffer; // byte array to store input + private int _start; // start poisition of the buffer + private int _end; // end position of the buffer + private uint _bitBuffer = 0; // store the bits here, we can quickly shift in this buffer + private int _bitsInBuffer = 0; // number of bits available in bitBuffer + + /// Total bits available in the input buffer. + public int AvailableBits => _bitsInBuffer; + + /// Total bytes available in the input buffer. + public int AvailableBytes => (_end - _start) + (_bitsInBuffer / 8); + + /// Ensure that count bits are in the bit buffer. + /// Can be up to 16. + /// Returns false if input is not sufficient to make this true. + public bool EnsureBitsAvailable(int count) + { + Debug.Assert(0 < count && count <= 16, "count is invalid."); + + // manual inlining to improve perf + if (_bitsInBuffer < count) + { + if (NeedsInput()) + { + return false; + } + // insert a byte to bitbuffer + _bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer; + _bitsInBuffer += 8; + + if (_bitsInBuffer < count) + { + if (NeedsInput()) + { + return false; + } + // insert a byte to bitbuffer + _bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer; + _bitsInBuffer += 8; + } + } + + return true; + } + + /// + /// This function will try to load 16 or more bits into bitBuffer. + /// It returns whatever is contained in bitBuffer after loading. + /// The main difference between this and GetBits is that this will + /// never return -1. So the caller needs to check AvailableBits to + /// see how many bits are available. + /// + public uint TryLoad16Bits() + { + if (_bitsInBuffer < 8) + { + if (_start < _end) + { + _bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer; + _bitsInBuffer += 8; + } + + if (_start < _end) + { + _bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer; + _bitsInBuffer += 8; + } + } + else if (_bitsInBuffer < 16) + { + if (_start < _end) + { + _bitBuffer |= (uint)_buffer[_start++] << _bitsInBuffer; + _bitsInBuffer += 8; + } + } + + return _bitBuffer; + } + + private uint GetBitMask(int count) => ((uint)1 << count) - 1; + + /// Gets count bits from the input buffer. Returns -1 if not enough bits available. + public int GetBits(int count) + { + Debug.Assert(0 < count && count <= 16, "count is invalid."); + + if (!EnsureBitsAvailable(count)) + { + return -1; + } + + int result = (int)(_bitBuffer & GetBitMask(count)); + _bitBuffer >>= count; + _bitsInBuffer -= count; + return result; + } + + /// + /// Copies length bytes from input buffer to output buffer starting at output[offset]. + /// You have to make sure, that the buffer is byte aligned. If not enough bytes are + /// available, copies fewer bytes. + /// + /// Returns the number of bytes copied, 0 if no byte is available. + public int CopyTo(byte[] output, int offset, int length) + { + Debug.Assert(output != null); + Debug.Assert(offset >= 0); + Debug.Assert(length >= 0); + Debug.Assert(offset <= output.Length - length); + Debug.Assert((_bitsInBuffer % 8) == 0); + + // Copy the bytes in bitBuffer first. + int bytesFromBitBuffer = 0; + while (_bitsInBuffer > 0 && length > 0) + { + output[offset++] = (byte)_bitBuffer; + _bitBuffer >>= 8; + _bitsInBuffer -= 8; + length--; + bytesFromBitBuffer++; + } + + if (length == 0) + { + return bytesFromBitBuffer; + } + + int avail = _end - _start; + if (length > avail) + { + length = avail; + } + + Array.Copy(_buffer, _start, output, offset, length); + _start += length; + return bytesFromBitBuffer + length; + } + + /// + /// Return true is all input bytes are used. + /// This means the caller can call SetInput to add more input. + /// + public bool NeedsInput() => _start == _end; + + /// + /// Set the byte array to be processed. + /// All the bits remained in bitBuffer will be processed before the new bytes. + /// We don't clone the byte array here since it is expensive. + /// The caller should make sure after a buffer is passed in. + /// It will not be changed before calling this function again. + /// + public void SetInput(byte[] buffer, int offset, int length) + { + Debug.Assert(buffer != null); + Debug.Assert(offset >= 0); + Debug.Assert(length >= 0); + Debug.Assert(offset <= buffer.Length - length); + Debug.Assert(_start == _end); + + _buffer = buffer; + _start = offset; + _end = offset + length; + } + + /// Skip n bits in the buffer. + public void SkipBits(int n) + { + Debug.Assert(_bitsInBuffer >= n, "No enough bits in the buffer, Did you call EnsureBitsAvailable?"); + _bitBuffer >>= n; + _bitsInBuffer -= n; + } + + /// Skips to the next byte boundary. + public void SkipToByteBoundary() + { + _bitBuffer >>= (_bitsInBuffer % 8); + _bitsInBuffer = _bitsInBuffer - (_bitsInBuffer % 8); + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/Match.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/Match.cs new file mode 100644 index 0000000000..4d5ce54bf5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/Match.cs @@ -0,0 +1,17 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +namespace SharpCompress.Compressors.Deflate64 +{ + /// + /// This class represents a match in the history window. + /// + internal sealed class Match + { + internal MatchState State { get; set; } + internal int Position { get; set; } + internal int Length { get; set; } + internal byte Symbol { get; set; } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/MatchState.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/MatchState.cs new file mode 100644 index 0000000000..f88913bcf7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/MatchState.cs @@ -0,0 +1,13 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +namespace SharpCompress.Compressors.Deflate64 +{ + internal enum MatchState + { + HasSymbol = 1, + HasMatch = 2, + HasSymbolAndMatch = 3 + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/OutputWindow.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/OutputWindow.cs new file mode 100644 index 0000000000..2dbc4442fd --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Deflate64/OutputWindow.cs @@ -0,0 +1,151 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; +using System.Diagnostics; + +namespace SharpCompress.Compressors.Deflate64 +{ + /// + /// This class maintains a window for decompressed output. + /// We need to keep this because the decompressed information can be + /// a literal or a length/distance pair. For length/distance pair, + /// we need to look back in the output window and copy bytes from there. + /// We use a byte array of WindowSize circularly. + /// + internal sealed class OutputWindow + { + // With Deflate64 we can have up to a 65536 length as well as up to a 65538 distance. This means we need a Window that is at + // least 131074 bytes long so we have space to retrieve up to a full 64kb in lookback and place it in our buffer without + // overwriting existing data. OutputWindow requires that the WindowSize be an exponent of 2, so we round up to 2^18. + private const int WINDOW_SIZE = 262144; + private const int WINDOW_MASK = 262143; + + private readonly byte[] _window = new byte[WINDOW_SIZE]; // The window is 2^18 bytes + private int _end; // this is the position to where we should write next byte + private int _bytesUsed; // The number of bytes in the output window which is not consumed. + + /// Add a byte to output window. + public void Write(byte b) + { + Debug.Assert(_bytesUsed < WINDOW_SIZE, "Can't add byte when window is full!"); + _window[_end++] = b; + _end &= WINDOW_MASK; + ++_bytesUsed; + } + + public void WriteLengthDistance(int length, int distance) + { + Debug.Assert((_bytesUsed + length) <= WINDOW_SIZE, "No Enough space"); + + // move backwards distance bytes in the output stream, + // and copy length bytes from this position to the output stream. + _bytesUsed += length; + int copyStart = (_end - distance) & WINDOW_MASK; // start position for coping. + + int border = WINDOW_SIZE - length; + if (copyStart <= border && _end < border) + { + if (length <= distance) + { + Array.Copy(_window, copyStart, _window, _end, length); + _end += length; + } + else + { + // The referenced string may overlap the current + // position; for example, if the last 2 bytes decoded have values + // X and Y, a string reference with + // adds X,Y,X,Y,X to the output stream. + while (length-- > 0) + { + _window[_end++] = _window[copyStart++]; + } + } + } + else + { + // copy byte by byte + while (length-- > 0) + { + _window[_end++] = _window[copyStart++]; + _end &= WINDOW_MASK; + copyStart &= WINDOW_MASK; + } + } + } + + /// + /// Copy up to length of bytes from input directly. + /// This is used for uncompressed block. + /// + public int CopyFrom(InputBuffer input, int length) + { + length = Math.Min(Math.Min(length, WINDOW_SIZE - _bytesUsed), input.AvailableBytes); + int copied; + + // We might need wrap around to copy all bytes. + int tailLen = WINDOW_SIZE - _end; + if (length > tailLen) + { + // copy the first part + copied = input.CopyTo(_window, _end, tailLen); + if (copied == tailLen) + { + // only try to copy the second part if we have enough bytes in input + copied += input.CopyTo(_window, 0, length - tailLen); + } + } + else + { + // only one copy is needed if there is no wrap around. + copied = input.CopyTo(_window, _end, length); + } + + _end = (_end + copied) & WINDOW_MASK; + _bytesUsed += copied; + return copied; + } + + /// Free space in output window. + public int FreeBytes => WINDOW_SIZE - _bytesUsed; + + /// Bytes not consumed in output window. + public int AvailableBytes => _bytesUsed; + + /// Copy the decompressed bytes to output array. + public int CopyTo(byte[] output, int offset, int length) + { + int copyEnd; + + if (length > _bytesUsed) + { + // we can copy all the decompressed bytes out + copyEnd = _end; + length = _bytesUsed; + } + else + { + copyEnd = (_end - _bytesUsed + length) & WINDOW_MASK; // copy length of bytes + } + + int copied = length; + + int tailLen = length - copyEnd; + if (tailLen > 0) + { + // this means we need to copy two parts separately + // copy tailLen bytes from the end of output window + Array.Copy(_window, WINDOW_SIZE - tailLen, + output, offset, tailLen); + offset += tailLen; + length = copyEnd; + } + Array.Copy(_window, copyEnd - length, output, offset, length); + _bytesUsed -= copied; + Debug.Assert(_bytesUsed >= 0, "check this function and find why we copied more bytes than we have"); + return copied; + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Filters/BCJ2Filter.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Filters/BCJ2Filter.cs new file mode 100644 index 0000000000..d767cd6c65 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Filters/BCJ2Filter.cs @@ -0,0 +1,221 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.Filters +{ + internal class BCJ2Filter : Stream + { + private readonly Stream _baseStream; + private readonly byte[] _input = new byte[4096]; + private int _inputOffset; + private int _inputCount; + private bool _endReached; + + private long _position; + private readonly byte[] _output = new byte[4]; + private int _outputOffset; + private int _outputCount; + + private readonly byte[] _control; + private readonly byte[] _data1; + private readonly byte[] _data2; + + private int _controlPos; + private int _data1Pos; + private int _data2Pos; + + private readonly ushort[] _p = new ushort[256 + 2]; + private uint _range, _code; + private byte _prevByte; + private bool _isDisposed; + + private const int K_NUM_TOP_BITS = 24; + private const int K_TOP_VALUE = 1 << K_NUM_TOP_BITS; + + private const int K_NUM_BIT_MODEL_TOTAL_BITS = 11; + private const int K_BIT_MODEL_TOTAL = 1 << K_NUM_BIT_MODEL_TOTAL_BITS; + private const int K_NUM_MOVE_BITS = 5; + + private static bool IsJ(byte b0, byte b1) + { + return (b1 & 0xFE) == 0xE8 || IsJcc(b0, b1); + } + + private static bool IsJcc(byte b0, byte b1) + { + return b0 == 0x0F && (b1 & 0xF0) == 0x80; + } + + public BCJ2Filter(byte[] control, byte[] data1, byte[] data2, Stream baseStream) + { + _control = control; + _data1 = data1; + _data2 = data2; + _baseStream = baseStream; + + int i; + for (i = 0; i < _p.Length; i++) + { + _p[i] = K_BIT_MODEL_TOTAL >> 1; + } + + _code = 0; + _range = 0xFFFFFFFF; + for (i = 0; i < 5; i++) + { + _code = (_code << 8) | control[_controlPos++]; + } + } + + protected override void Dispose(bool disposing) + { + if (_isDisposed) + { + return; + } + _isDisposed = true; + base.Dispose(disposing); + _baseStream.Dispose(); + } + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Length => _baseStream.Length + _data1.Length + _data2.Length; + + public override long Position { get => _position; set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + int size = 0; + byte b = 0; + + while (!_endReached && size < count) + { + while (_outputOffset < _outputCount) + { + b = _output[_outputOffset++]; + buffer[offset++] = b; + size++; + _position++; + + _prevByte = b; + if (size == count) + { + return size; + } + } + + if (_inputOffset == _inputCount) + { + _inputOffset = 0; + _inputCount = _baseStream.Read(_input, 0, _input.Length); + if (_inputCount == 0) + { + _endReached = true; + break; + } + } + + b = _input[_inputOffset++]; + buffer[offset++] = b; + size++; + _position++; + + if (!IsJ(_prevByte, b)) + { + _prevByte = b; + } + else + { + int prob; + if (b == 0xE8) + { + prob = _prevByte; + } + else if (b == 0xE9) + { + prob = 256; + } + else + { + prob = 257; + } + + uint bound = (_range >> K_NUM_BIT_MODEL_TOTAL_BITS) * _p[prob]; + if (_code < bound) + { + _range = bound; + _p[prob] += (ushort)((K_BIT_MODEL_TOTAL - _p[prob]) >> K_NUM_MOVE_BITS); + if (_range < K_TOP_VALUE) + { + _range <<= 8; + _code = (_code << 8) | _control[_controlPos++]; + } + _prevByte = b; + } + else + { + _range -= bound; + _code -= bound; + _p[prob] -= (ushort)(_p[prob] >> K_NUM_MOVE_BITS); + if (_range < K_TOP_VALUE) + { + _range <<= 8; + _code = (_code << 8) | _control[_controlPos++]; + } + + uint dest; + if (b == 0xE8) + { + dest = + (uint) + ((_data1[_data1Pos++] << 24) | (_data1[_data1Pos++] << 16) | (_data1[_data1Pos++] << 8) | + _data1[_data1Pos++]); + } + else + { + dest = + (uint) + ((_data2[_data2Pos++] << 24) | (_data2[_data2Pos++] << 16) | (_data2[_data2Pos++] << 8) | + _data2[_data2Pos++]); + } + dest -= (uint)(_position + 4); + + _output[0] = (byte)dest; + _output[1] = (byte)(dest >> 8); + _output[2] = (byte)(dest >> 16); + _output[3] = (byte)(dest >> 24); + _outputOffset = 0; + _outputCount = 4; + } + } + } + + return size; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Filters/BCJFilter.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Filters/BCJFilter.cs new file mode 100644 index 0000000000..3caa492b8c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Filters/BCJFilter.cs @@ -0,0 +1,113 @@ +using System.IO; + +namespace SharpCompress.Compressors.Filters +{ + internal class BCJFilter : Filter + { + private static readonly bool[] MASK_TO_ALLOWED_STATUS = {true, true, true, false, true, false, false, false}; + + private static readonly int[] MASK_TO_BIT_NUMBER = {0, 1, 2, 2, 3, 3, 3, 3}; + + private int _pos; + private int _prevMask; + + public BCJFilter(bool isEncoder, Stream baseStream) + : base(isEncoder, baseStream, 5) + { + _pos = 5; + } + + private static bool Test86MsByte(byte b) + { + return b == 0x00 || b == 0xFF; + } + + protected override int Transform(byte[] buffer, int offset, int count) + { + int prevPos = offset - 1; + int end = offset + count - 5; + int i; + + for (i = offset; i <= end; ++i) + { + if ((buffer[i] & 0xFE) != 0xE8) + { + continue; + } + + prevPos = i - prevPos; + if ((prevPos & ~3) != 0) + { + // (unsigned)prevPos > 3 + _prevMask = 0; + } + else + { + _prevMask = (_prevMask << (prevPos - 1)) & 7; + if (_prevMask != 0) + { + if (!MASK_TO_ALLOWED_STATUS[_prevMask] || Test86MsByte( + buffer[i + 4 - MASK_TO_BIT_NUMBER[_prevMask]])) + { + prevPos = i; + _prevMask = (_prevMask << 1) | 1; + continue; + } + } + } + + prevPos = i; + + if (Test86MsByte(buffer[i + 4])) + { + int src = buffer[i + 1] + | (buffer[i + 2] << 8) + | (buffer[i + 3] << 16) + | (buffer[i + 4] << 24); + int dest; + while (true) + { + if (_isEncoder) + { + dest = src + (_pos + i - offset); + } + else + { + dest = src - (_pos + i - offset); + } + + if (_prevMask == 0) + { + break; + } + + int index = MASK_TO_BIT_NUMBER[_prevMask] * 8; + if (!Test86MsByte((byte)(dest >> (24 - index)))) + { + break; + } + + src = dest ^ ((1 << (32 - index)) - 1); + } + + buffer[i + 1] = (byte)dest; + buffer[i + 2] = (byte)(dest >> 8); + buffer[i + 3] = (byte)(dest >> 16); + buffer[i + 4] = (byte)(~(((dest >> 24) & 1) - 1)); + i += 4; + } + else + { + _prevMask = (_prevMask << 1) | 1; + } + } + + prevPos = i - prevPos; + _prevMask = ((prevPos & ~3) != 0) ? 0 : _prevMask << (prevPos - 1); + + i -= offset; + _pos += i; + return i; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Filters/Filter.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Filters/Filter.cs new file mode 100644 index 0000000000..b7fd815985 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Filters/Filter.cs @@ -0,0 +1,154 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.Filters +{ + internal abstract class Filter : Stream + { + protected bool _isEncoder; + protected Stream _baseStream; + + private readonly byte[] _tail; + private readonly byte[] _window; + private int _transformed; + private int _read; + private bool _endReached; + private bool _isDisposed; + + protected Filter(bool isEncoder, Stream baseStream, int lookahead) + { + _isEncoder = isEncoder; + _baseStream = baseStream; + _tail = new byte[lookahead - 1]; + _window = new byte[_tail.Length * 2]; + } + + protected override void Dispose(bool disposing) + { + if (_isDisposed) + { + return; + } + _isDisposed = true; + base.Dispose(disposing); + _baseStream.Dispose(); + } + + public override bool CanRead => !_isEncoder; + + public override bool CanSeek => false; + + public override bool CanWrite => _isEncoder; + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Length => _baseStream.Length; + + public override long Position { get => _baseStream.Position; set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + int size = 0; + + if (_transformed > 0) + { + int copySize = _transformed; + if (copySize > count) + { + copySize = count; + } + Buffer.BlockCopy(_tail, 0, buffer, offset, copySize); + _transformed -= copySize; + _read -= copySize; + offset += copySize; + count -= copySize; + size += copySize; + Buffer.BlockCopy(_tail, copySize, _tail, 0, _read); + } + if (count == 0) + { + return size; + } + + int inSize = _read; + if (inSize > count) + { + inSize = count; + } + Buffer.BlockCopy(_tail, 0, buffer, offset, inSize); + _read -= inSize; + Buffer.BlockCopy(_tail, inSize, _tail, 0, _read); + while (!_endReached && inSize < count) + { + int baseRead = _baseStream.Read(buffer, offset + inSize, count - inSize); + inSize += baseRead; + if (baseRead == 0) + { + _endReached = true; + } + } + while (!_endReached && _read < _tail.Length) + { + int baseRead = _baseStream.Read(_tail, _read, _tail.Length - _read); + _read += baseRead; + if (baseRead == 0) + { + _endReached = true; + } + } + + if (inSize > _tail.Length) + { + _transformed = Transform(buffer, offset, inSize); + offset += _transformed; + count -= _transformed; + size += _transformed; + inSize -= _transformed; + _transformed = 0; + } + + if (count == 0) + { + return size; + } + + Buffer.BlockCopy(buffer, offset, _window, 0, inSize); + Buffer.BlockCopy(_tail, 0, _window, inSize, _read); + if (inSize + _read > _tail.Length) + { + _transformed = Transform(_window, 0, inSize + _read); + } + else + { + _transformed = inSize + _read; + } + Buffer.BlockCopy(_window, 0, buffer, offset, inSize); + Buffer.BlockCopy(_window, inSize, _tail, 0, _read); + size += inSize; + _transformed -= inSize; + + return size; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + Transform(buffer, offset, count); + _baseStream.Write(buffer, offset, count); + } + + protected abstract int Transform(byte[] buffer, int offset, int count); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/AesDecoderStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/AesDecoderStream.cs new file mode 100644 index 0000000000..744336df5d --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/AesDecoderStream.cs @@ -0,0 +1,266 @@ + +#if !NO_CRYPTO +using System; +using System.IO; +using System.Security.Cryptography; +using System.Text; +using SharpCompress.Compressors.LZMA.Utilites; + +namespace SharpCompress.Compressors.LZMA +{ + internal class AesDecoderStream : DecoderStream2 + { + #region Variables + + private readonly Stream mStream; + private readonly ICryptoTransform mDecoder; + private readonly byte[] mBuffer; + private long mWritten; + private readonly long mLimit; + private int mOffset; + private int mEnding; + private int mUnderflow; + private bool isDisposed; + + #endregion + + #region Stream Methods + + public AesDecoderStream(Stream input, byte[] info, IPasswordProvider pass, long limit) + { + mStream = input; + mLimit = limit; + + if (((uint) input.Length & 15) != 0) + throw new NotSupportedException("AES decoder does not support padding."); + + int numCyclesPower; + byte[] salt, seed; + Init(info, out numCyclesPower, out salt, out seed); + + byte[] password = Encoding.Unicode.GetBytes(pass.CryptoGetTextPassword()); + byte[] key = InitKey(numCyclesPower, salt, password); + + using (var aes = Aes.Create()) + { + aes.Mode = CipherMode.CBC; + aes.Padding = PaddingMode.None; + mDecoder = aes.CreateDecryptor(key, seed); + } + + mBuffer = new byte[4 << 10]; + } + + protected override void Dispose(bool disposing) + { + try + { + if (isDisposed) + { + return; + } + isDisposed = true; + if (disposing) + { + mStream.Dispose(); + mDecoder.Dispose(); + } + } + finally + { + base.Dispose(disposing); + } + } + + public override long Position + { + get + { + return mWritten; + } + } + + public override long Length + { + get + { + return mLimit; + } + } + + public override int Read(byte[] buffer, int offset, int count) + { + if (count == 0 + || mWritten == mLimit) + return 0; + + if (mUnderflow > 0) + return HandleUnderflow(buffer, offset, count); + + // Need at least 16 bytes to proceed. + if (mEnding - mOffset < 16) + { + Buffer.BlockCopy(mBuffer, mOffset, mBuffer, 0, mEnding - mOffset); + mEnding -= mOffset; + mOffset = 0; + + do + { + int read = mStream.Read(mBuffer, mEnding, mBuffer.Length - mEnding); + if (read == 0) + { + // We are not done decoding and have less than 16 bytes. + throw new EndOfStreamException(); + } + + mEnding += read; + } + while (mEnding - mOffset < 16); + } + + // We shouldn't return more data than we are limited to. + // Currently this is handled by forcing an underflow if + // the stream length is not a multiple of the block size. + if (count > mLimit - mWritten) + count = (int) (mLimit - mWritten); + + // We cannot transform less than 16 bytes into the target buffer, + // but we also cannot return zero, so we need to handle this. + // We transform the data locally and use our own buffer as cache. + if (count < 16) + return HandleUnderflow(buffer, offset, count); + + if (count > mEnding - mOffset) + count = mEnding - mOffset; + + // Otherwise we transform directly into the target buffer. + int processed = mDecoder.TransformBlock(mBuffer, mOffset, count & ~15, buffer, offset); + mOffset += processed; + mWritten += processed; + return processed; + } + + #endregion + + #region Private Methods + + private void Init(byte[] info, out int numCyclesPower, out byte[] salt, out byte[] iv) + { + byte bt = info[0]; + numCyclesPower = bt & 0x3F; + + if ((bt & 0xC0) == 0) + { + salt = new byte[0]; + iv = new byte[0]; + return; + } + + int saltSize = (bt >> 7) & 1; + int ivSize = (bt >> 6) & 1; + if (info.Length == 1) + throw new InvalidOperationException(); + + byte bt2 = info[1]; + saltSize += (bt2 >> 4); + ivSize += (bt2 & 15); + if (info.Length < 2 + saltSize + ivSize) + throw new InvalidOperationException(); + + salt = new byte[saltSize]; + for (int i = 0; i < saltSize; i++) + salt[i] = info[i + 2]; + + iv = new byte[16]; + for (int i = 0; i < ivSize; i++) + iv[i] = info[i + saltSize + 2]; + + if (numCyclesPower > 24) + throw new NotSupportedException(); + } + + private byte[] InitKey(int mNumCyclesPower, byte[] salt, byte[] pass) + { + if (mNumCyclesPower == 0x3F) + { + var key = new byte[32]; + + int pos; + for (pos = 0; pos < salt.Length; pos++) + key[pos] = salt[pos]; + for (int i = 0; i < pass.Length && pos < 32; i++) + key[pos++] = pass[i]; + + return key; + } + else + { +#if NETSTANDARD1_3 + using (IncrementalHash sha = IncrementalHash.CreateHash(HashAlgorithmName.SHA256)) + { + byte[] counter = new byte[8]; + long numRounds = 1L << mNumCyclesPower; + for (long round = 0; round < numRounds; round++) + { + sha.AppendData(salt, 0, salt.Length); + sha.AppendData(pass, 0, pass.Length); + sha.AppendData(counter, 0, 8); + + // This mirrors the counter so we don't have to convert long to byte[] each round. + // (It also ensures the counter is little endian, which BitConverter does not.) + for (int i = 0; i < 8; i++) + if (++counter[i] != 0) + break; + } + return sha.GetHashAndReset(); + } +#else + using (var sha = SHA256.Create()) + { + byte[] counter = new byte[8]; + long numRounds = 1L << mNumCyclesPower; + for (long round = 0; round < numRounds; round++) + { + sha.TransformBlock(salt, 0, salt.Length, null, 0); + sha.TransformBlock(pass, 0, pass.Length, null, 0); + sha.TransformBlock(counter, 0, 8, null, 0); + + // This mirrors the counter so we don't have to convert long to byte[] each round. + // (It also ensures the counter is little endian, which BitConverter does not.) + for (int i = 0; i < 8; i++) + if (++counter[i] != 0) + break; + } + + sha.TransformFinalBlock(counter, 0, 0); + return sha.Hash; + } +#endif + } + } + + private int HandleUnderflow(byte[] buffer, int offset, int count) + { + // If this is zero we were called to create a new underflow buffer. + // Just transform as much as possible so we can feed from it as long as possible. + if (mUnderflow == 0) + { + int blockSize = (mEnding - mOffset) & ~15; + mUnderflow = mDecoder.TransformBlock(mBuffer, mOffset, blockSize, mBuffer, mOffset); + } + + if (count > mUnderflow) + count = mUnderflow; + + Buffer.BlockCopy(mBuffer, mOffset, buffer, offset, count); + mWritten += count; + mOffset += count; + mUnderflow -= count; + return count; + } + + #endregion + } +} + +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Bcj2DecoderStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Bcj2DecoderStream.cs new file mode 100644 index 0000000000..4bdac69b5e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Bcj2DecoderStream.cs @@ -0,0 +1,283 @@ +using System; +using System.Collections.Generic; +using System.IO; + +namespace SharpCompress.Compressors.LZMA +{ + internal class Bcj2DecoderStream : DecoderStream2 + { + private const int K_NUM_TOP_BITS = 24; + private const uint K_TOP_VALUE = (1 << K_NUM_TOP_BITS); + + private class RangeDecoder + { + internal readonly Stream _mStream; + internal uint _range; + internal uint _code; + + public RangeDecoder(Stream stream) + { + _mStream = stream; + _range = 0xFFFFFFFF; + for (int i = 0; i < 5; i++) + { + _code = (_code << 8) | ReadByte(); + } + } + + public byte ReadByte() + { + int bt = _mStream.ReadByte(); + if (bt < 0) + { + throw new EndOfStreamException(); + } + + return (byte)bt; + } + + public void Dispose() + { + _mStream.Dispose(); + } + } + + private class StatusDecoder + { + private const int NUM_MOVE_BITS = 5; + + private const int K_NUM_BIT_MODEL_TOTAL_BITS = 11; + private const uint K_BIT_MODEL_TOTAL = 1u << K_NUM_BIT_MODEL_TOTAL_BITS; + + private uint _prob; + + public StatusDecoder() + { + _prob = K_BIT_MODEL_TOTAL / 2; + } + + private void UpdateModel(uint symbol) + { + /* + Prob -= (Prob + ((symbol - 1) & ((1 << numMoveBits) - 1))) >> numMoveBits; + Prob += (1 - symbol) << (kNumBitModelTotalBits - numMoveBits); + */ + if (symbol == 0) + { + _prob += (K_BIT_MODEL_TOTAL - _prob) >> NUM_MOVE_BITS; + } + else + { + _prob -= (_prob) >> NUM_MOVE_BITS; + } + } + + public uint Decode(RangeDecoder decoder) + { + uint newBound = (decoder._range >> K_NUM_BIT_MODEL_TOTAL_BITS) * _prob; + if (decoder._code < newBound) + { + decoder._range = newBound; + _prob += (K_BIT_MODEL_TOTAL - _prob) >> NUM_MOVE_BITS; + if (decoder._range < K_TOP_VALUE) + { + decoder._code = (decoder._code << 8) | decoder.ReadByte(); + decoder._range <<= 8; + } + return 0; + } + decoder._range -= newBound; + decoder._code -= newBound; + _prob -= _prob >> NUM_MOVE_BITS; + if (decoder._range < K_TOP_VALUE) + { + decoder._code = (decoder._code << 8) | decoder.ReadByte(); + decoder._range <<= 8; + } + return 1; + } + } + + private readonly Stream _mMainStream; + private readonly Stream _mCallStream; + private readonly Stream _mJumpStream; + private readonly RangeDecoder _mRangeDecoder; + private readonly StatusDecoder[] _mStatusDecoder; + private long _mWritten; + private readonly IEnumerator _mIter; + private bool _mFinished; + private bool _isDisposed; + + public Bcj2DecoderStream(Stream[] streams, byte[] info, long limit) + { + if (info != null && info.Length > 0) + { + throw new NotSupportedException(); + } + + if (streams.Length != 4) + { + throw new NotSupportedException(); + } + + _mMainStream = streams[0]; + _mCallStream = streams[1]; + _mJumpStream = streams[2]; + _mRangeDecoder = new RangeDecoder(streams[3]); + + _mStatusDecoder = new StatusDecoder[256 + 2]; + for (int i = 0; i < _mStatusDecoder.Length; i++) + { + _mStatusDecoder[i] = new StatusDecoder(); + } + + _mIter = Run().GetEnumerator(); + } + + protected override void Dispose(bool disposing) + { + if (_isDisposed) + { + return; + } + _isDisposed = true; + base.Dispose(disposing); + _mMainStream.Dispose(); + _mCallStream.Dispose(); + _mJumpStream.Dispose(); + } + + private static bool IsJcc(byte b0, byte b1) + { + return b0 == 0x0F + && (b1 & 0xF0) == 0x80; + } + + private static bool IsJ(byte b0, byte b1) + { + return (b1 & 0xFE) == 0xE8 + || IsJcc(b0, b1); + } + + private static int GetIndex(byte b0, byte b1) + { + if (b1 == 0xE8) + { + return b0; + } + if (b1 == 0xE9) + { + return 256; + } + return 257; + } + + public override int Read(byte[] buffer, int offset, int count) + { + if (count == 0 || _mFinished) + { + return 0; + } + + for (int i = 0; i < count; i++) + { + if (!_mIter.MoveNext()) + { + _mFinished = true; + return i; + } + + buffer[offset + i] = _mIter.Current; + } + + return count; + } + + public override int ReadByte() + { + if (_mFinished) + { + return -1; + } + + if (!_mIter.MoveNext()) + { + _mFinished = true; + return -1; + } + + return _mIter.Current; + } + + public IEnumerable Run() + { + const uint kBurstSize = (1u << 18); + + byte prevByte = 0; + uint processedBytes = 0; + for (;;) + { + byte b = 0; + uint i; + for (i = 0; i < kBurstSize; i++) + { + int tmp = _mMainStream.ReadByte(); + if (tmp < 0) + { + yield break; + } + + b = (byte)tmp; + _mWritten++; + yield return b; + if (IsJ(prevByte, b)) + { + break; + } + + prevByte = b; + } + + processedBytes += i; + if (i == kBurstSize) + { + continue; + } + + if (_mStatusDecoder[GetIndex(prevByte, b)].Decode(_mRangeDecoder) == 1) + { + Stream s = (b == 0xE8) ? _mCallStream : _mJumpStream; + + uint src = 0; + for (i = 0; i < 4; i++) + { + int b0 = s.ReadByte(); + if (b0 < 0) + { + throw new EndOfStreamException(); + } + + src <<= 8; + src |= (uint)b0; + } + + uint dest = src - (uint)(_mWritten + 4); + _mWritten++; + yield return (byte)dest; + _mWritten++; + yield return (byte)(dest >> 8); + _mWritten++; + yield return (byte)(dest >> 16); + _mWritten++; + yield return (byte)(dest >> 24); + prevByte = (byte)(dest >> 24); + processedBytes += 4; + } + else + { + prevByte = b; + } + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/BitVector.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/BitVector.cs new file mode 100644 index 0000000000..9f267f72bd --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/BitVector.cs @@ -0,0 +1,101 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace SharpCompress.Compressors.LZMA +{ + internal class BitVector + { + private readonly uint[] _mBits; + + public BitVector(int length) + { + Length = length; + _mBits = new uint[(length + 31) >> 5]; + } + + public BitVector(int length, bool initValue) + { + Length = length; + _mBits = new uint[(length + 31) >> 5]; + + if (initValue) + { + for (int i = 0; i < _mBits.Length; i++) + { + _mBits[i] = ~0u; + } + } + } + + public BitVector(List bits) + : this(bits.Count) + { + for (int i = 0; i < bits.Count; i++) + { + if (bits[i]) + { + SetBit(i); + } + } + } + + public bool[] ToArray() + { + bool[] bits = new bool[Length]; + for (int i = 0; i < bits.Length; i++) + { + bits[i] = this[i]; + } + return bits; + } + + public int Length { get; } + + public bool this[int index] + { + get + { + if (index < 0 || index >= Length) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + + return (_mBits[index >> 5] & (1u << (index & 31))) != 0; + } + } + + public void SetBit(int index) + { + if (index < 0 || index >= Length) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + + _mBits[index >> 5] |= 1u << (index & 31); + } + + internal bool GetAndSet(int index) + { + if (index < 0 || index >= Length) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + + uint bits = _mBits[index >> 5]; + uint mask = 1u << (index & 31); + _mBits[index >> 5] |= mask; + return (bits & mask) != 0; + } + + public override string ToString() + { + StringBuilder sb = new StringBuilder(Length); + for (int i = 0; i < Length; i++) + { + sb.Append(this[i] ? 'x' : '.'); + } + return sb.ToString(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/CRC.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/CRC.cs new file mode 100644 index 0000000000..2ee0600a67 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/CRC.cs @@ -0,0 +1,89 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.LZMA +{ + internal static class Crc + { + internal const uint INIT_CRC = 0xFFFFFFFF; + internal static readonly uint[] TABLE = new uint[4 * 256]; + + static Crc() + { + const uint kCrcPoly = 0xEDB88320; + + for (uint i = 0; i < 256; i++) + { + uint r = i; + for (int j = 0; j < 8; j++) + { + r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1)); + } + + TABLE[i] = r; + } + + for (uint i = 256; i < TABLE.Length; i++) + { + uint r = TABLE[i - 256]; + TABLE[i] = TABLE[r & 0xFF] ^ (r >> 8); + } + } + + public static uint From(Stream stream, long length) + { + uint crc = INIT_CRC; + byte[] buffer = new byte[Math.Min(length, 4 << 10)]; + while (length > 0) + { + int delta = stream.Read(buffer, 0, (int)Math.Min(length, buffer.Length)); + if (delta == 0) + { + throw new EndOfStreamException(); + } + crc = Update(crc, buffer, 0, delta); + length -= delta; + } + return Finish(crc); + } + + public static uint Finish(uint crc) + { + return ~crc; + } + + public static uint Update(uint crc, byte bt) + { + return TABLE[(crc & 0xFF) ^ bt] ^ (crc >> 8); + } + + public static uint Update(uint crc, uint value) + { + crc ^= value; + return TABLE[0x300 + (crc & 0xFF)] + ^ TABLE[0x200 + ((crc >> 8) & 0xFF)] + ^ TABLE[0x100 + ((crc >> 16) & 0xFF)] + ^ TABLE[0x000 + (crc >> 24)]; + } + + public static uint Update(uint crc, ulong value) + { + return Update(Update(crc, (uint)value), (uint)(value >> 32)); + } + + public static uint Update(uint crc, long value) + { + return Update(crc, (ulong)value); + } + + public static uint Update(uint crc, byte[] buffer, int offset, int length) + { + for (int i = 0; i < length; i++) + { + crc = Update(crc, buffer[offset + i]); + } + + return crc; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/DecoderStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/DecoderStream.cs new file mode 100644 index 0000000000..1921ebab4b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/DecoderStream.cs @@ -0,0 +1,182 @@ +using System; +using System.IO; +using SharpCompress.Common.SevenZip; +using SharpCompress.Compressors.LZMA.Utilites; +using SharpCompress.IO; + +namespace SharpCompress.Compressors.LZMA +{ + internal abstract class DecoderStream2 : Stream + { + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } + + internal static class DecoderStreamHelper + { + private static int FindCoderIndexForOutStreamIndex(CFolder folderInfo, int outStreamIndex) + { + for (int coderIndex = 0; coderIndex < folderInfo._coders.Count; coderIndex++) + { + var coderInfo = folderInfo._coders[coderIndex]; + outStreamIndex -= coderInfo._numOutStreams; + if (outStreamIndex < 0) + { + return coderIndex; + } + } + + throw new InvalidOperationException("Could not link output stream to coder."); + } + + private static void FindPrimaryOutStreamIndex(CFolder folderInfo, out int primaryCoderIndex, + out int primaryOutStreamIndex) + { + bool foundPrimaryOutStream = false; + primaryCoderIndex = -1; + primaryOutStreamIndex = -1; + + for (int outStreamIndex = 0, coderIndex = 0; + coderIndex < folderInfo._coders.Count; + coderIndex++) + { + for (int coderOutStreamIndex = 0; + coderOutStreamIndex < folderInfo._coders[coderIndex]._numOutStreams; + coderOutStreamIndex++, outStreamIndex++) + { + if (folderInfo.FindBindPairForOutStream(outStreamIndex) < 0) + { + if (foundPrimaryOutStream) + { + throw new NotSupportedException("Multiple output streams."); + } + + foundPrimaryOutStream = true; + primaryCoderIndex = coderIndex; + primaryOutStreamIndex = outStreamIndex; + } + } + } + + if (!foundPrimaryOutStream) + { + throw new NotSupportedException("No output stream."); + } + } + + private static Stream CreateDecoderStream(Stream[] packStreams, long[] packSizes, Stream[] outStreams, + CFolder folderInfo, int coderIndex, IPasswordProvider pass) + { + var coderInfo = folderInfo._coders[coderIndex]; + if (coderInfo._numOutStreams != 1) + { + throw new NotSupportedException("Multiple output streams are not supported."); + } + + int inStreamId = 0; + for (int i = 0; i < coderIndex; i++) + { + inStreamId += folderInfo._coders[i]._numInStreams; + } + + int outStreamId = 0; + for (int i = 0; i < coderIndex; i++) + { + outStreamId += folderInfo._coders[i]._numOutStreams; + } + + Stream[] inStreams = new Stream[coderInfo._numInStreams]; + + for (int i = 0; i < inStreams.Length; i++, inStreamId++) + { + int bindPairIndex = folderInfo.FindBindPairForInStream(inStreamId); + if (bindPairIndex >= 0) + { + int pairedOutIndex = folderInfo._bindPairs[bindPairIndex]._outIndex; + + if (outStreams[pairedOutIndex] != null) + { + throw new NotSupportedException("Overlapping stream bindings are not supported."); + } + + int otherCoderIndex = FindCoderIndexForOutStreamIndex(folderInfo, pairedOutIndex); + inStreams[i] = CreateDecoderStream(packStreams, packSizes, outStreams, folderInfo, otherCoderIndex, + pass); + + //inStreamSizes[i] = folderInfo.UnpackSizes[pairedOutIndex]; + + if (outStreams[pairedOutIndex] != null) + { + throw new NotSupportedException("Overlapping stream bindings are not supported."); + } + + outStreams[pairedOutIndex] = inStreams[i]; + } + else + { + int index = folderInfo.FindPackStreamArrayIndex(inStreamId); + if (index < 0) + { + throw new NotSupportedException("Could not find input stream binding."); + } + + inStreams[i] = packStreams[index]; + + //inStreamSizes[i] = packSizes[index]; + } + } + + long unpackSize = folderInfo._unpackSizes[outStreamId]; + return DecoderRegistry.CreateDecoderStream(coderInfo._methodId, inStreams, coderInfo._props, pass, unpackSize); + } + + internal static Stream CreateDecoderStream(Stream inStream, long startPos, long[] packSizes, CFolder folderInfo, + IPasswordProvider pass) + { + if (!folderInfo.CheckStructure()) + { + throw new NotSupportedException("Unsupported stream binding structure."); + } + + Stream[] inStreams = new Stream[folderInfo._packStreams.Count]; + for (int j = 0; j < folderInfo._packStreams.Count; j++) + { + inStreams[j] = new BufferedSubStream(inStream, startPos, packSizes[j]); + startPos += packSizes[j]; + } + + Stream[] outStreams = new Stream[folderInfo._unpackSizes.Count]; + + int primaryCoderIndex, primaryOutStreamIndex; + FindPrimaryOutStreamIndex(folderInfo, out primaryCoderIndex, out primaryOutStreamIndex); + return CreateDecoderStream(inStreams, packSizes, outStreams, folderInfo, primaryCoderIndex, pass); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/ICoder.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/ICoder.cs new file mode 100644 index 0000000000..f5049dd74d --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/ICoder.cs @@ -0,0 +1,172 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.LZMA +{ + /// + /// The exception that is thrown when an error in input stream occurs during decoding. + /// + internal class DataErrorException : Exception + { + public DataErrorException() + : base("Data Error") + { + } + } + + /// + /// The exception that is thrown when the value of an argument is outside the allowable range. + /// + internal class InvalidParamException : Exception + { + public InvalidParamException() + : base("Invalid Parameter") + { + } + } + + internal interface ICodeProgress + { + /// + /// Callback progress. + /// + /// + /// input size. -1 if unknown. + /// + /// + /// output size. -1 if unknown. + /// + void SetProgress(Int64 inSize, Int64 outSize); + } + + internal interface ICoder + { + /// + /// Codes streams. + /// + /// + /// input Stream. + /// + /// + /// output Stream. + /// + /// + /// input Size. -1 if unknown. + /// + /// + /// output Size. -1 if unknown. + /// + /// + /// callback progress reference. + /// + void Code(Stream inStream, Stream outStream, + Int64 inSize, Int64 outSize, ICodeProgress progress); + } + + /* + public interface ICoder2 + { + void Code(ISequentialInStream []inStreams, + const UInt64 []inSizes, + ISequentialOutStream []outStreams, + UInt64 []outSizes, + ICodeProgress progress); + }; + */ + + /// + /// Provides the fields that represent properties idenitifiers for compressing. + /// + internal enum CoderPropId + { + /// + /// Specifies default property. + /// + DefaultProp = 0, + + /// + /// Specifies size of dictionary. + /// + DictionarySize, + + /// + /// Specifies size of memory for PPM*. + /// + UsedMemorySize, + + /// + /// Specifies order for PPM methods. + /// + Order, + + /// + /// Specifies Block Size. + /// + BlockSize, + + /// + /// Specifies number of postion state bits for LZMA (0 - x - 4). + /// + PosStateBits, + + /// + /// Specifies number of literal context bits for LZMA (0 - x - 8). + /// + LitContextBits, + + /// + /// Specifies number of literal position bits for LZMA (0 - x - 4). + /// + LitPosBits, + + /// + /// Specifies number of fast bytes for LZ*. + /// + NumFastBytes, + + /// + /// Specifies match finder. LZMA: "BT2", "BT4" or "BT4B". + /// + MatchFinder, + + /// + /// Specifies the number of match finder cyckes. + /// + MatchFinderCycles, + + /// + /// Specifies number of passes. + /// + NumPasses, + + /// + /// Specifies number of algorithm. + /// + Algorithm, + + /// + /// Specifies the number of threads. + /// + NumThreads, + + /// + /// Specifies mode with end marker. + /// + EndMarker + } + + internal interface ISetCoderProperties + { + void SetCoderProperties(CoderPropId[] propIDs, object[] properties); + } + + internal interface IWriteCoderProperties + { + void WriteCoderProperties(Stream outStream); + } + + internal interface ISetDecoderProperties + { + void SetDecoderProperties(byte[] properties); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzBinTree.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzBinTree.cs new file mode 100644 index 0000000000..bfa34b84fa --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzBinTree.cs @@ -0,0 +1,424 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.LZMA.LZ +{ + internal class BinTree : InWindow + { + private UInt32 _cyclicBufferPos; + private UInt32 _cyclicBufferSize; + private UInt32 _matchMaxLen; + + private UInt32[] _son; + private UInt32[] _hash; + + private UInt32 _cutValue = 0xFF; + private UInt32 _hashMask; + private UInt32 _hashSizeSum; + + private bool _hashArray = true; + + private const UInt32 K_HASH2_SIZE = 1 << 10; + private const UInt32 K_HASH3_SIZE = 1 << 16; + private const UInt32 K_BT2_HASH_SIZE = 1 << 16; + private const UInt32 K_START_MAX_LEN = 1; + private const UInt32 K_HASH3_OFFSET = K_HASH2_SIZE; + private const UInt32 K_EMPTY_HASH_VALUE = 0; + private const UInt32 K_MAX_VAL_FOR_NORMALIZE = ((UInt32)1 << 31) - 1; + + private UInt32 _kNumHashDirectBytes; + private UInt32 _kMinMatchCheck = 4; + private UInt32 _kFixHashSize = K_HASH2_SIZE + K_HASH3_SIZE; + + public void SetType(int numHashBytes) + { + _hashArray = (numHashBytes > 2); + if (_hashArray) + { + _kNumHashDirectBytes = 0; + _kMinMatchCheck = 4; + _kFixHashSize = K_HASH2_SIZE + K_HASH3_SIZE; + } + else + { + _kNumHashDirectBytes = 2; + _kMinMatchCheck = 2 + 1; + _kFixHashSize = 0; + } + } + + public new void SetStream(Stream stream) + { + base.SetStream(stream); + } + + public new void ReleaseStream() + { + base.ReleaseStream(); + } + + public new void Init() + { + base.Init(); + for (UInt32 i = 0; i < _hashSizeSum; i++) + { + _hash[i] = K_EMPTY_HASH_VALUE; + } + _cyclicBufferPos = 0; + ReduceOffsets(-1); + } + + public new void MovePos() + { + if (++_cyclicBufferPos >= _cyclicBufferSize) + { + _cyclicBufferPos = 0; + } + base.MovePos(); + if (_pos == K_MAX_VAL_FOR_NORMALIZE) + { + Normalize(); + } + } + + public new Byte GetIndexByte(Int32 index) + { + return base.GetIndexByte(index); + } + + public new UInt32 GetMatchLen(Int32 index, UInt32 distance, UInt32 limit) + { + return base.GetMatchLen(index, distance, limit); + } + + public new UInt32 GetNumAvailableBytes() + { + return base.GetNumAvailableBytes(); + } + + public void Create(UInt32 historySize, UInt32 keepAddBufferBefore, + UInt32 matchMaxLen, UInt32 keepAddBufferAfter) + { + if (historySize > K_MAX_VAL_FOR_NORMALIZE - 256) + { + throw new Exception(); + } + _cutValue = 16 + (matchMaxLen >> 1); + + UInt32 windowReservSize = (historySize + keepAddBufferBefore + + matchMaxLen + keepAddBufferAfter) / 2 + 256; + + base.Create(historySize + keepAddBufferBefore, matchMaxLen + keepAddBufferAfter, windowReservSize); + + _matchMaxLen = matchMaxLen; + + UInt32 cyclicBufferSize = historySize + 1; + if (_cyclicBufferSize != cyclicBufferSize) + { + _son = new UInt32[(_cyclicBufferSize = cyclicBufferSize) * 2]; + } + + UInt32 hs = K_BT2_HASH_SIZE; + + if (_hashArray) + { + hs = historySize - 1; + hs |= (hs >> 1); + hs |= (hs >> 2); + hs |= (hs >> 4); + hs |= (hs >> 8); + hs >>= 1; + hs |= 0xFFFF; + if (hs > (1 << 24)) + { + hs >>= 1; + } + _hashMask = hs; + hs++; + hs += _kFixHashSize; + } + if (hs != _hashSizeSum) + { + _hash = new UInt32[_hashSizeSum = hs]; + } + } + + public UInt32 GetMatches(UInt32[] distances) + { + UInt32 lenLimit; + if (_pos + _matchMaxLen <= _streamPos) + { + lenLimit = _matchMaxLen; + } + else + { + lenLimit = _streamPos - _pos; + if (lenLimit < _kMinMatchCheck) + { + MovePos(); + return 0; + } + } + + UInt32 offset = 0; + UInt32 matchMinPos = (_pos > _cyclicBufferSize) ? (_pos - _cyclicBufferSize) : 0; + UInt32 cur = _bufferOffset + _pos; + UInt32 maxLen = K_START_MAX_LEN; // to avoid items for len < hashSize; + UInt32 hashValue, hash2Value = 0, hash3Value = 0; + + if (_hashArray) + { + UInt32 temp = Crc.TABLE[_bufferBase[cur]] ^ _bufferBase[cur + 1]; + hash2Value = temp & (K_HASH2_SIZE - 1); + temp ^= ((UInt32)(_bufferBase[cur + 2]) << 8); + hash3Value = temp & (K_HASH3_SIZE - 1); + hashValue = (temp ^ (Crc.TABLE[_bufferBase[cur + 3]] << 5)) & _hashMask; + } + else + { + hashValue = _bufferBase[cur] ^ ((UInt32)(_bufferBase[cur + 1]) << 8); + } + + UInt32 curMatch = _hash[_kFixHashSize + hashValue]; + if (_hashArray) + { + UInt32 curMatch2 = _hash[hash2Value]; + UInt32 curMatch3 = _hash[K_HASH3_OFFSET + hash3Value]; + _hash[hash2Value] = _pos; + _hash[K_HASH3_OFFSET + hash3Value] = _pos; + if (curMatch2 > matchMinPos) + { + if (_bufferBase[_bufferOffset + curMatch2] == _bufferBase[cur]) + { + distances[offset++] = maxLen = 2; + distances[offset++] = _pos - curMatch2 - 1; + } + } + if (curMatch3 > matchMinPos) + { + if (_bufferBase[_bufferOffset + curMatch3] == _bufferBase[cur]) + { + if (curMatch3 == curMatch2) + { + offset -= 2; + } + distances[offset++] = maxLen = 3; + distances[offset++] = _pos - curMatch3 - 1; + curMatch2 = curMatch3; + } + } + if (offset != 0 && curMatch2 == curMatch) + { + offset -= 2; + maxLen = K_START_MAX_LEN; + } + } + + _hash[_kFixHashSize + hashValue] = _pos; + + UInt32 ptr0 = (_cyclicBufferPos << 1) + 1; + UInt32 ptr1 = (_cyclicBufferPos << 1); + + UInt32 len0, len1; + len0 = len1 = _kNumHashDirectBytes; + + if (_kNumHashDirectBytes != 0) + { + if (curMatch > matchMinPos) + { + if (_bufferBase[_bufferOffset + curMatch + _kNumHashDirectBytes] != + _bufferBase[cur + _kNumHashDirectBytes]) + { + distances[offset++] = maxLen = _kNumHashDirectBytes; + distances[offset++] = _pos - curMatch - 1; + } + } + } + + UInt32 count = _cutValue; + + while (true) + { + if (curMatch <= matchMinPos || count-- == 0) + { + _son[ptr0] = _son[ptr1] = K_EMPTY_HASH_VALUE; + break; + } + UInt32 delta = _pos - curMatch; + UInt32 cyclicPos = ((delta <= _cyclicBufferPos) + ? (_cyclicBufferPos - delta) + : (_cyclicBufferPos - delta + _cyclicBufferSize)) << 1; + + UInt32 pby1 = _bufferOffset + curMatch; + UInt32 len = Math.Min(len0, len1); + if (_bufferBase[pby1 + len] == _bufferBase[cur + len]) + { + while (++len != lenLimit) + { + if (_bufferBase[pby1 + len] != _bufferBase[cur + len]) + { + break; + } + } + if (maxLen < len) + { + distances[offset++] = maxLen = len; + distances[offset++] = delta - 1; + if (len == lenLimit) + { + _son[ptr1] = _son[cyclicPos]; + _son[ptr0] = _son[cyclicPos + 1]; + break; + } + } + } + if (_bufferBase[pby1 + len] < _bufferBase[cur + len]) + { + _son[ptr1] = curMatch; + ptr1 = cyclicPos + 1; + curMatch = _son[ptr1]; + len1 = len; + } + else + { + _son[ptr0] = curMatch; + ptr0 = cyclicPos; + curMatch = _son[ptr0]; + len0 = len; + } + } + MovePos(); + return offset; + } + + public void Skip(UInt32 num) + { + do + { + UInt32 lenLimit; + if (_pos + _matchMaxLen <= _streamPos) + { + lenLimit = _matchMaxLen; + } + else + { + lenLimit = _streamPos - _pos; + if (lenLimit < _kMinMatchCheck) + { + MovePos(); + continue; + } + } + + UInt32 matchMinPos = (_pos > _cyclicBufferSize) ? (_pos - _cyclicBufferSize) : 0; + UInt32 cur = _bufferOffset + _pos; + + UInt32 hashValue; + + if (_hashArray) + { + UInt32 temp = Crc.TABLE[_bufferBase[cur]] ^ _bufferBase[cur + 1]; + UInt32 hash2Value = temp & (K_HASH2_SIZE - 1); + _hash[hash2Value] = _pos; + temp ^= ((UInt32)(_bufferBase[cur + 2]) << 8); + UInt32 hash3Value = temp & (K_HASH3_SIZE - 1); + _hash[K_HASH3_OFFSET + hash3Value] = _pos; + hashValue = (temp ^ (Crc.TABLE[_bufferBase[cur + 3]] << 5)) & _hashMask; + } + else + { + hashValue = _bufferBase[cur] ^ ((UInt32)(_bufferBase[cur + 1]) << 8); + } + + UInt32 curMatch = _hash[_kFixHashSize + hashValue]; + _hash[_kFixHashSize + hashValue] = _pos; + + UInt32 ptr0 = (_cyclicBufferPos << 1) + 1; + UInt32 ptr1 = (_cyclicBufferPos << 1); + + UInt32 len0, len1; + len0 = len1 = _kNumHashDirectBytes; + + UInt32 count = _cutValue; + while (true) + { + if (curMatch <= matchMinPos || count-- == 0) + { + _son[ptr0] = _son[ptr1] = K_EMPTY_HASH_VALUE; + break; + } + + UInt32 delta = _pos - curMatch; + UInt32 cyclicPos = ((delta <= _cyclicBufferPos) + ? (_cyclicBufferPos - delta) + : (_cyclicBufferPos - delta + _cyclicBufferSize)) << 1; + + UInt32 pby1 = _bufferOffset + curMatch; + UInt32 len = Math.Min(len0, len1); + if (_bufferBase[pby1 + len] == _bufferBase[cur + len]) + { + while (++len != lenLimit) + { + if (_bufferBase[pby1 + len] != _bufferBase[cur + len]) + { + break; + } + } + if (len == lenLimit) + { + _son[ptr1] = _son[cyclicPos]; + _son[ptr0] = _son[cyclicPos + 1]; + break; + } + } + if (_bufferBase[pby1 + len] < _bufferBase[cur + len]) + { + _son[ptr1] = curMatch; + ptr1 = cyclicPos + 1; + curMatch = _son[ptr1]; + len1 = len; + } + else + { + _son[ptr0] = curMatch; + ptr0 = cyclicPos; + curMatch = _son[ptr0]; + len0 = len; + } + } + MovePos(); + } + while (--num != 0); + } + + private void NormalizeLinks(UInt32[] items, UInt32 numItems, UInt32 subValue) + { + for (UInt32 i = 0; i < numItems; i++) + { + UInt32 value = items[i]; + if (value <= subValue) + { + value = K_EMPTY_HASH_VALUE; + } + else + { + value -= subValue; + } + items[i] = value; + } + } + + private void Normalize() + { + UInt32 subValue = _pos - _cyclicBufferSize; + NormalizeLinks(_son, _cyclicBufferSize * 2, subValue); + NormalizeLinks(_hash, _hashSizeSum, subValue); + ReduceOffsets((Int32)subValue); + } + + public void SetCutValue(UInt32 cutValue) + { + _cutValue = cutValue; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzInWindow.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzInWindow.cs new file mode 100644 index 0000000000..ad8c9170ee --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzInWindow.cs @@ -0,0 +1,183 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.LZMA.LZ +{ + internal class InWindow + { + public Byte[] _bufferBase; // pointer to buffer with data + private Stream _stream; + private UInt32 _posLimit; // offset (from _buffer) of first byte when new block reading must be done + private bool _streamEndWasReached; // if (true) then _streamPos shows real end of stream + + private UInt32 _pointerToLastSafePosition; + + public UInt32 _bufferOffset; + + public UInt32 _blockSize; // Size of Allocated memory block + public UInt32 _pos; // offset (from _buffer) of curent byte + private UInt32 _keepSizeBefore; // how many BYTEs must be kept in buffer before _pos + private UInt32 _keepSizeAfter; // how many BYTEs must be kept buffer after _pos + public UInt32 _streamPos; // offset (from _buffer) of first not read byte from Stream + + public void MoveBlock() + { + UInt32 offset = _bufferOffset + _pos - _keepSizeBefore; + + // we need one additional byte, since MovePos moves on 1 byte. + if (offset > 0) + { + offset--; + } + + UInt32 numBytes = _bufferOffset + _streamPos - offset; + + // check negative offset ???? + for (UInt32 i = 0; i < numBytes; i++) + { + _bufferBase[i] = _bufferBase[offset + i]; + } + _bufferOffset -= offset; + } + + public virtual void ReadBlock() + { + if (_streamEndWasReached) + { + return; + } + while (true) + { + int size = (int)((0 - _bufferOffset) + _blockSize - _streamPos); + if (size == 0) + { + return; + } + int numReadBytes = _stream != null + ? _stream.Read(_bufferBase, (int)(_bufferOffset + _streamPos), size) + : 0; + if (numReadBytes == 0) + { + _posLimit = _streamPos; + UInt32 pointerToPostion = _bufferOffset + _posLimit; + if (pointerToPostion > _pointerToLastSafePosition) + { + _posLimit = _pointerToLastSafePosition - _bufferOffset; + } + + _streamEndWasReached = true; + return; + } + _streamPos += (UInt32)numReadBytes; + if (_streamPos >= _pos + _keepSizeAfter) + { + _posLimit = _streamPos - _keepSizeAfter; + } + } + } + + private void Free() + { + _bufferBase = null; + } + + public void Create(UInt32 keepSizeBefore, UInt32 keepSizeAfter, UInt32 keepSizeReserv) + { + _keepSizeBefore = keepSizeBefore; + _keepSizeAfter = keepSizeAfter; + UInt32 blockSize = keepSizeBefore + keepSizeAfter + keepSizeReserv; + if (_bufferBase == null || _blockSize != blockSize) + { + Free(); + _blockSize = blockSize; + _bufferBase = new Byte[_blockSize]; + } + _pointerToLastSafePosition = _blockSize - keepSizeAfter; + _streamEndWasReached = false; + } + + public void SetStream(Stream stream) + { + _stream = stream; + if (_streamEndWasReached) + { + _streamEndWasReached = false; + if (IsDataStarved) + { + ReadBlock(); + } + } + } + + public void ReleaseStream() + { + _stream = null; + } + + public void Init() + { + _bufferOffset = 0; + _pos = 0; + _streamPos = 0; + _streamEndWasReached = false; + ReadBlock(); + } + + public void MovePos() + { + _pos++; + if (_pos > _posLimit) + { + UInt32 pointerToPostion = _bufferOffset + _pos; + if (pointerToPostion > _pointerToLastSafePosition) + { + MoveBlock(); + } + ReadBlock(); + } + } + + public Byte GetIndexByte(Int32 index) + { + return _bufferBase[_bufferOffset + _pos + index]; + } + + // index + limit have not to exceed _keepSizeAfter; + public UInt32 GetMatchLen(Int32 index, UInt32 distance, UInt32 limit) + { + if (_streamEndWasReached) + { + if ((_pos + index) + limit > _streamPos) + { + limit = _streamPos - (UInt32)(_pos + index); + } + } + distance++; + + // Byte *pby = _buffer + (size_t)_pos + index; + UInt32 pby = _bufferOffset + _pos + (UInt32)index; + + UInt32 i; + for (i = 0; i < limit && _bufferBase[pby + i] == _bufferBase[pby + i - distance]; i++) + { + ; + } + return i; + } + + public UInt32 GetNumAvailableBytes() + { + return _streamPos - _pos; + } + + public void ReduceOffsets(Int32 subValue) + { + _bufferOffset += (UInt32)subValue; + _posLimit -= (UInt32)subValue; + _pos -= (UInt32)subValue; + _streamPos -= (UInt32)subValue; + } + + public bool IsDataStarved => _streamPos - _pos < _keepSizeAfter; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzOutWindow.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzOutWindow.cs new file mode 100644 index 0000000000..6736dc1ae2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZ/LzOutWindow.cs @@ -0,0 +1,205 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.LZMA.LZ +{ + internal class OutWindow + { + private byte[] _buffer; + private int _windowSize; + private int _pos; + private int _streamPos; + private int _pendingLen; + private int _pendingDist; + private Stream _stream; + + public long _total; + public long _limit; + + public void Create(int windowSize) + { + if (_windowSize != windowSize) + { + _buffer = new byte[windowSize]; + } + else + { + _buffer[windowSize - 1] = 0; + } + _windowSize = windowSize; + _pos = 0; + _streamPos = 0; + _pendingLen = 0; + _total = 0; + _limit = 0; + } + + public void Reset() + { + Create(_windowSize); + } + + public void Init(Stream stream) + { + ReleaseStream(); + _stream = stream; + } + + public void Train(Stream stream) + { + long len = stream.Length; + int size = (len < _windowSize) ? (int)len : _windowSize; + stream.Position = len - size; + _total = 0; + _limit = size; + _pos = _windowSize - size; + CopyStream(stream, size); + if (_pos == _windowSize) + { + _pos = 0; + } + _streamPos = _pos; + } + + public void ReleaseStream() + { + Flush(); + _stream = null; + } + + public void Flush() + { + if (_stream == null) + { + return; + } + int size = _pos - _streamPos; + if (size == 0) + { + return; + } + _stream.Write(_buffer, _streamPos, size); + if (_pos >= _windowSize) + { + _pos = 0; + } + _streamPos = _pos; + } + + public void CopyBlock(int distance, int len) + { + int size = len; + int pos = _pos - distance - 1; + if (pos < 0) + { + pos += _windowSize; + } + for (; size > 0 && _pos < _windowSize && _total < _limit; size--) + { + if (pos >= _windowSize) + { + pos = 0; + } + _buffer[_pos++] = _buffer[pos++]; + _total++; + if (_pos >= _windowSize) + { + Flush(); + } + } + _pendingLen = size; + _pendingDist = distance; + } + + public void PutByte(byte b) + { + _buffer[_pos++] = b; + _total++; + if (_pos >= _windowSize) + { + Flush(); + } + } + + public byte GetByte(int distance) + { + int pos = _pos - distance - 1; + if (pos < 0) + { + pos += _windowSize; + } + return _buffer[pos]; + } + + public int CopyStream(Stream stream, int len) + { + int size = len; + while (size > 0 && _pos < _windowSize && _total < _limit) + { + int curSize = _windowSize - _pos; + if (curSize > _limit - _total) + { + curSize = (int)(_limit - _total); + } + if (curSize > size) + { + curSize = size; + } + int numReadBytes = stream.Read(_buffer, _pos, curSize); + if (numReadBytes == 0) + { + throw new DataErrorException(); + } + size -= numReadBytes; + _pos += numReadBytes; + _total += numReadBytes; + if (_pos >= _windowSize) + { + Flush(); + } + } + return len - size; + } + + public void SetLimit(long size) + { + _limit = _total + size; + } + + public bool HasSpace => _pos < _windowSize && _total < _limit; + + public bool HasPending => _pendingLen > 0; + + public int Read(byte[] buffer, int offset, int count) + { + if (_streamPos >= _pos) + { + return 0; + } + + int size = _pos - _streamPos; + if (size > count) + { + size = count; + } + Buffer.BlockCopy(_buffer, _streamPos, buffer, offset, size); + _streamPos += size; + if (_streamPos >= _windowSize) + { + _pos = 0; + _streamPos = 0; + } + return size; + } + + public void CopyPending() + { + if (_pendingLen > 0) + { + CopyBlock(_pendingDist, _pendingLen); + } + } + + public int AvailableBytes => _pos - _streamPos; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZipStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZipStream.cs new file mode 100644 index 0000000000..588386f59c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LZipStream.cs @@ -0,0 +1,203 @@ +using System; +using System.IO; +using SharpCompress.Converters; +using SharpCompress.Crypto; +using SharpCompress.IO; + +namespace SharpCompress.Compressors.LZMA +{ + // TODO: + // - Write as well as read + // - Multi-volume support + // - Use of the data size / member size values at the end of the stream + + /// + /// Stream supporting the LZIP format, as documented at http://www.nongnu.org/lzip/manual/lzip_manual.html + /// + public class LZipStream : Stream + { + private readonly Stream _stream; + private readonly CountingWritableSubStream _countingWritableSubStream; + private bool _disposed; + private bool _finished; + + private long _writeCount; + + public LZipStream(Stream stream, CompressionMode mode) + { + Mode = mode; + + if (mode == CompressionMode.Decompress) + { + int dSize = ValidateAndReadSize(stream); + if (dSize == 0) + { + throw new IOException("Not an LZip stream"); + } + byte[] properties = GetProperties(dSize); + _stream = new LzmaStream(properties, stream); + } + else + { + //default + int dSize = 104 * 1024; + WriteHeaderSize(stream); + + _countingWritableSubStream = new CountingWritableSubStream(stream); + _stream = new Crc32Stream(new LzmaStream(new LzmaEncoderProperties(true, dSize), false, _countingWritableSubStream)); + } + } + + public void Finish() + { + if (!_finished) + { + if (Mode == CompressionMode.Compress) + { + var crc32Stream = (Crc32Stream)_stream; + crc32Stream.WrappedStream.Dispose(); + crc32Stream.Dispose(); + var compressedCount = _countingWritableSubStream.Count; + + var bytes = DataConverter.LittleEndian.GetBytes(crc32Stream.Crc); + _countingWritableSubStream.Write(bytes, 0, bytes.Length); + + bytes = DataConverter.LittleEndian.GetBytes(_writeCount); + _countingWritableSubStream.Write(bytes, 0, bytes.Length); + + //total with headers + bytes = DataConverter.LittleEndian.GetBytes(compressedCount + 6 + 20); + _countingWritableSubStream.Write(bytes, 0, bytes.Length); + } + _finished = true; + } + } + + #region Stream methods + + protected override void Dispose(bool disposing) + { + if (_disposed) + { + return; + } + _disposed = true; + if (disposing) + { + Finish(); + _stream.Dispose(); + } + } + + public CompressionMode Mode { get; } + + public override bool CanRead => Mode == CompressionMode.Decompress; + + public override bool CanSeek => false; + + public override bool CanWrite => Mode == CompressionMode.Compress; + + public override void Flush() + { + _stream.Flush(); + } + + // TODO: Both Length and Position are sometimes feasible, but would require + // reading the output length when we initialize. + public override long Length => throw new NotImplementedException(); + + public override long Position { get => throw new NotImplementedException(); set => throw new NotImplementedException(); } + + public override int Read(byte[] buffer, int offset, int count) => _stream.Read(buffer, offset, count); + + public override int ReadByte() => _stream.ReadByte(); + + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); + + public override void SetLength(long value) => throw new NotImplementedException(); + + public override void Write(byte[] buffer, int offset, int count) + { + _stream.Write(buffer, offset, count); + _writeCount += count; + } + + public override void WriteByte(byte value) + { + _stream.WriteByte(value); + ++_writeCount; + } + + #endregion + + /// + /// Determines if the given stream is positioned at the start of a v1 LZip + /// file, as indicated by the ASCII characters "LZIP" and a version byte + /// of 1, followed by at least one byte. + /// + /// The stream to read from. Must not be null. + /// true if the given stream is an LZip file, false otherwise. + public static bool IsLZipFile(Stream stream) => ValidateAndReadSize(stream) != 0; + + /// + /// Reads the 6-byte header of the stream, and returns 0 if either the header + /// couldn't be read or it isn't a validate LZIP header, or the dictionary + /// size if it *is* a valid LZIP file. + /// + public static int ValidateAndReadSize(Stream stream) + { + if (stream == null) + { + throw new ArgumentNullException(nameof(stream)); + } + // Read the header + byte[] header = new byte[6]; + int n = stream.Read(header, 0, header.Length); + + // TODO: Handle reading only part of the header? + + if (n != 6) + { + return 0; + } + + if (header[0] != 'L' || header[1] != 'Z' || header[2] != 'I' || header[3] != 'P' || header[4] != 1 /* version 1 */) + { + return 0; + } + int basePower = header[5] & 0x1F; + int subtractionNumerator = (header[5] & 0xE0) >> 5; + return (1 << basePower) - subtractionNumerator * (1 << (basePower - 4)); + } + + public static void WriteHeaderSize(Stream stream) + { + if (stream == null) + { + throw new ArgumentNullException(nameof(stream)); + } + // hard coding the dictionary size encoding + byte[] header = new byte[6] {(byte)'L', (byte)'Z', (byte)'I', (byte)'P', 1, 113}; + stream.Write(header, 0, 6); + } + + /// + /// Creates a byte array to communicate the parameters and dictionary size to LzmaStream. + /// + private static byte[] GetProperties(int dictionarySize) => + new byte[] + { + // Parameters as per http://www.nongnu.org/lzip/manual/lzip_manual.html#Stream-format + // but encoded as a single byte in the format LzmaStream expects. + // literal_context_bits = 3 + // literal_pos_state_bits = 0 + // pos_state_bits = 2 + 93, + // Dictionary size as 4-byte little-endian value + (byte)(dictionarySize & 0xff), + (byte)((dictionarySize >> 8) & 0xff), + (byte)((dictionarySize >> 16) & 0xff), + (byte)((dictionarySize >> 24) & 0xff) + }; + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Log.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Log.cs new file mode 100644 index 0000000000..f3b4eab58d --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Log.cs @@ -0,0 +1,94 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; + +namespace SharpCompress.Compressors.LZMA +{ + internal static class Log + { + private static readonly Stack INDENT = new Stack(); + private static bool NEEDS_INDENT = true; + + static Log() + { + INDENT.Push(""); + } + + public static void PushIndent(string indent = " ") + { + INDENT.Push(INDENT.Peek() + indent); + } + + public static void PopIndent() + { + if (INDENT.Count == 1) + { + throw new InvalidOperationException(); + } + + INDENT.Pop(); + } + + private static void EnsureIndent() + { + if (NEEDS_INDENT) + { + NEEDS_INDENT = false; +#if !NO_FILE + Debug.Write(INDENT.Peek()); +#endif + } + } + + public static void Write(object value) + { + EnsureIndent(); +#if !NO_FILE + Debug.Write(value); +#endif + } + + public static void Write(string text) + { + EnsureIndent(); +#if !NO_FILE + Debug.Write(text); +#endif + } + + public static void Write(string format, params object[] args) + { + EnsureIndent(); +#if !NO_FILE + Debug.Write(string.Format(format, args)); +#endif + } + + public static void WriteLine() + { + Debug.WriteLine(""); + NEEDS_INDENT = true; + } + + public static void WriteLine(object value) + { + EnsureIndent(); + Debug.WriteLine(value); + NEEDS_INDENT = true; + } + + public static void WriteLine(string text) + { + EnsureIndent(); + Debug.WriteLine(text); + NEEDS_INDENT = true; + } + + public static void WriteLine(string format, params object[] args) + { + EnsureIndent(); + Debug.WriteLine(string.Format(format, args)); + NEEDS_INDENT = true; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaBase.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaBase.cs new file mode 100644 index 0000000000..6e2b8a5ddc --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaBase.cs @@ -0,0 +1,109 @@ +namespace SharpCompress.Compressors.LZMA +{ + internal abstract class Base + { + public const uint K_NUM_REP_DISTANCES = 4; + public const uint K_NUM_STATES = 12; + + // static byte []kLiteralNextStates = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5}; + // static byte []kMatchNextStates = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10}; + // static byte []kRepNextStates = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11}; + // static byte []kShortRepNextStates = {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11}; + + public struct State + { + public uint _index; + + public void Init() + { + _index = 0; + } + + public void UpdateChar() + { + if (_index < 4) + { + _index = 0; + } + else if (_index < 10) + { + _index -= 3; + } + else + { + _index -= 6; + } + } + + public void UpdateMatch() + { + _index = (uint)(_index < 7 ? 7 : 10); + } + + public void UpdateRep() + { + _index = (uint)(_index < 7 ? 8 : 11); + } + + public void UpdateShortRep() + { + _index = (uint)(_index < 7 ? 9 : 11); + } + + public bool IsCharState() + { + return _index < 7; + } + } + + public const int K_NUM_POS_SLOT_BITS = 6; + public const int K_DIC_LOG_SIZE_MIN = 0; + + // public const int kDicLogSizeMax = 30; + // public const uint kDistTableSizeMax = kDicLogSizeMax * 2; + + public const int K_NUM_LEN_TO_POS_STATES_BITS = 2; // it's for speed optimization + public const uint K_NUM_LEN_TO_POS_STATES = 1 << K_NUM_LEN_TO_POS_STATES_BITS; + + public const uint K_MATCH_MIN_LEN = 2; + + public static uint GetLenToPosState(uint len) + { + len -= K_MATCH_MIN_LEN; + if (len < K_NUM_LEN_TO_POS_STATES) + { + return len; + } + return K_NUM_LEN_TO_POS_STATES - 1; + } + + public const int K_NUM_ALIGN_BITS = 4; + public const uint K_ALIGN_TABLE_SIZE = 1 << K_NUM_ALIGN_BITS; + public const uint K_ALIGN_MASK = (K_ALIGN_TABLE_SIZE - 1); + + public const uint K_START_POS_MODEL_INDEX = 4; + public const uint K_END_POS_MODEL_INDEX = 14; + public const uint K_NUM_POS_MODELS = K_END_POS_MODEL_INDEX - K_START_POS_MODEL_INDEX; + + public const uint K_NUM_FULL_DISTANCES = 1 << ((int)K_END_POS_MODEL_INDEX / 2); + + public const uint K_NUM_LIT_POS_STATES_BITS_ENCODING_MAX = 4; + public const uint K_NUM_LIT_CONTEXT_BITS_MAX = 8; + + public const int K_NUM_POS_STATES_BITS_MAX = 4; + public const uint K_NUM_POS_STATES_MAX = (1 << K_NUM_POS_STATES_BITS_MAX); + public const int K_NUM_POS_STATES_BITS_ENCODING_MAX = 4; + public const uint K_NUM_POS_STATES_ENCODING_MAX = (1 << K_NUM_POS_STATES_BITS_ENCODING_MAX); + + public const int K_NUM_LOW_LEN_BITS = 3; + public const int K_NUM_MID_LEN_BITS = 3; + public const int K_NUM_HIGH_LEN_BITS = 8; + public const uint K_NUM_LOW_LEN_SYMBOLS = 1 << K_NUM_LOW_LEN_BITS; + public const uint K_NUM_MID_LEN_SYMBOLS = 1 << K_NUM_MID_LEN_BITS; + + public const uint K_NUM_LEN_SYMBOLS = K_NUM_LOW_LEN_SYMBOLS + K_NUM_MID_LEN_SYMBOLS + + (1 << K_NUM_HIGH_LEN_BITS); + + public const uint K_MATCH_MAX_LEN = K_MATCH_MIN_LEN + K_NUM_LEN_SYMBOLS - 1; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaDecoder.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaDecoder.cs new file mode 100644 index 0000000000..896e92cf5b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaDecoder.cs @@ -0,0 +1,481 @@ +using System; +using System.IO; +using SharpCompress.Compressors.LZMA.LZ; +using SharpCompress.Compressors.LZMA.RangeCoder; + +namespace SharpCompress.Compressors.LZMA +{ + internal class Decoder : ICoder, ISetDecoderProperties // ,System.IO.Stream + { + private class LenDecoder + { + private BitDecoder _choice = new BitDecoder(); + private BitDecoder _choice2 = new BitDecoder(); + private readonly BitTreeDecoder[] _lowCoder = new BitTreeDecoder[Base.K_NUM_POS_STATES_MAX]; + private readonly BitTreeDecoder[] _midCoder = new BitTreeDecoder[Base.K_NUM_POS_STATES_MAX]; + private BitTreeDecoder _highCoder = new BitTreeDecoder(Base.K_NUM_HIGH_LEN_BITS); + private uint _numPosStates; + + public void Create(uint numPosStates) + { + for (uint posState = _numPosStates; posState < numPosStates; posState++) + { + _lowCoder[posState] = new BitTreeDecoder(Base.K_NUM_LOW_LEN_BITS); + _midCoder[posState] = new BitTreeDecoder(Base.K_NUM_MID_LEN_BITS); + } + _numPosStates = numPosStates; + } + + public void Init() + { + _choice.Init(); + for (uint posState = 0; posState < _numPosStates; posState++) + { + _lowCoder[posState].Init(); + _midCoder[posState].Init(); + } + _choice2.Init(); + _highCoder.Init(); + } + + public uint Decode(RangeCoder.Decoder rangeDecoder, uint posState) + { + if (_choice.Decode(rangeDecoder) == 0) + { + return _lowCoder[posState].Decode(rangeDecoder); + } + uint symbol = Base.K_NUM_LOW_LEN_SYMBOLS; + if (_choice2.Decode(rangeDecoder) == 0) + { + symbol += _midCoder[posState].Decode(rangeDecoder); + } + else + { + symbol += Base.K_NUM_MID_LEN_SYMBOLS; + symbol += _highCoder.Decode(rangeDecoder); + } + return symbol; + } + } + + private class LiteralDecoder + { + private struct Decoder2 + { + private BitDecoder[] _decoders; + + public void Create() + { + _decoders = new BitDecoder[0x300]; + } + + public void Init() + { + for (int i = 0; i < 0x300; i++) + { + _decoders[i].Init(); + } + } + + public byte DecodeNormal(RangeCoder.Decoder rangeDecoder) + { + uint symbol = 1; + do + { + symbol = (symbol << 1) | _decoders[symbol].Decode(rangeDecoder); + } + while (symbol < 0x100); + return (byte)symbol; + } + + public byte DecodeWithMatchByte(RangeCoder.Decoder rangeDecoder, byte matchByte) + { + uint symbol = 1; + do + { + uint matchBit = (uint)(matchByte >> 7) & 1; + matchByte <<= 1; + uint bit = _decoders[((1 + matchBit) << 8) + symbol].Decode(rangeDecoder); + symbol = (symbol << 1) | bit; + if (matchBit != bit) + { + while (symbol < 0x100) + { + symbol = (symbol << 1) | _decoders[symbol].Decode(rangeDecoder); + } + break; + } + } + while (symbol < 0x100); + return (byte)symbol; + } + } + + private Decoder2[] _coders; + private int _numPrevBits; + private int _numPosBits; + private uint _posMask; + + public void Create(int numPosBits, int numPrevBits) + { + if (_coders != null && _numPrevBits == numPrevBits && + _numPosBits == numPosBits) + { + return; + } + _numPosBits = numPosBits; + _posMask = ((uint)1 << numPosBits) - 1; + _numPrevBits = numPrevBits; + uint numStates = (uint)1 << (_numPrevBits + _numPosBits); + _coders = new Decoder2[numStates]; + for (uint i = 0; i < numStates; i++) + { + _coders[i].Create(); + } + } + + public void Init() + { + uint numStates = (uint)1 << (_numPrevBits + _numPosBits); + for (uint i = 0; i < numStates; i++) + { + _coders[i].Init(); + } + } + + private uint GetState(uint pos, byte prevByte) + { + return ((pos & _posMask) << _numPrevBits) + (uint)(prevByte >> (8 - _numPrevBits)); + } + + public byte DecodeNormal(RangeCoder.Decoder rangeDecoder, uint pos, byte prevByte) + { + return _coders[GetState(pos, prevByte)].DecodeNormal(rangeDecoder); + } + + public byte DecodeWithMatchByte(RangeCoder.Decoder rangeDecoder, uint pos, byte prevByte, byte matchByte) + { + return _coders[GetState(pos, prevByte)].DecodeWithMatchByte(rangeDecoder, matchByte); + } + } + + private OutWindow _outWindow; + + private readonly BitDecoder[] _isMatchDecoders = new BitDecoder[Base.K_NUM_STATES << Base.K_NUM_POS_STATES_BITS_MAX]; + private readonly BitDecoder[] _isRepDecoders = new BitDecoder[Base.K_NUM_STATES]; + private readonly BitDecoder[] _isRepG0Decoders = new BitDecoder[Base.K_NUM_STATES]; + private readonly BitDecoder[] _isRepG1Decoders = new BitDecoder[Base.K_NUM_STATES]; + private readonly BitDecoder[] _isRepG2Decoders = new BitDecoder[Base.K_NUM_STATES]; + private readonly BitDecoder[] _isRep0LongDecoders = new BitDecoder[Base.K_NUM_STATES << Base.K_NUM_POS_STATES_BITS_MAX]; + + private readonly BitTreeDecoder[] _posSlotDecoder = new BitTreeDecoder[Base.K_NUM_LEN_TO_POS_STATES]; + private readonly BitDecoder[] _posDecoders = new BitDecoder[Base.K_NUM_FULL_DISTANCES - Base.K_END_POS_MODEL_INDEX]; + + private BitTreeDecoder _posAlignDecoder = new BitTreeDecoder(Base.K_NUM_ALIGN_BITS); + + private readonly LenDecoder _lenDecoder = new LenDecoder(); + private readonly LenDecoder _repLenDecoder = new LenDecoder(); + + private readonly LiteralDecoder _literalDecoder = new LiteralDecoder(); + + private int _dictionarySize; + + private uint _posStateMask; + + private Base.State _state = new Base.State(); + private uint _rep0, _rep1, _rep2, _rep3; + + public Decoder() + { + _dictionarySize = -1; + for (int i = 0; i < Base.K_NUM_LEN_TO_POS_STATES; i++) + { + _posSlotDecoder[i] = new BitTreeDecoder(Base.K_NUM_POS_SLOT_BITS); + } + } + + private void CreateDictionary() + { + if (_dictionarySize < 0) + { + throw new InvalidParamException(); + } + _outWindow = new OutWindow(); + int blockSize = Math.Max(_dictionarySize, (1 << 12)); + _outWindow.Create(blockSize); + } + + private void SetLiteralProperties(int lp, int lc) + { + if (lp > 8) + { + throw new InvalidParamException(); + } + if (lc > 8) + { + throw new InvalidParamException(); + } + _literalDecoder.Create(lp, lc); + } + + private void SetPosBitsProperties(int pb) + { + if (pb > Base.K_NUM_POS_STATES_BITS_MAX) + { + throw new InvalidParamException(); + } + uint numPosStates = (uint)1 << pb; + _lenDecoder.Create(numPosStates); + _repLenDecoder.Create(numPosStates); + _posStateMask = numPosStates - 1; + } + + private void Init() + { + uint i; + for (i = 0; i < Base.K_NUM_STATES; i++) + { + for (uint j = 0; j <= _posStateMask; j++) + { + uint index = (i << Base.K_NUM_POS_STATES_BITS_MAX) + j; + _isMatchDecoders[index].Init(); + _isRep0LongDecoders[index].Init(); + } + _isRepDecoders[i].Init(); + _isRepG0Decoders[i].Init(); + _isRepG1Decoders[i].Init(); + _isRepG2Decoders[i].Init(); + } + + _literalDecoder.Init(); + for (i = 0; i < Base.K_NUM_LEN_TO_POS_STATES; i++) + { + _posSlotDecoder[i].Init(); + } + + // _PosSpecDecoder.Init(); + for (i = 0; i < Base.K_NUM_FULL_DISTANCES - Base.K_END_POS_MODEL_INDEX; i++) + { + _posDecoders[i].Init(); + } + + _lenDecoder.Init(); + _repLenDecoder.Init(); + _posAlignDecoder.Init(); + + _state.Init(); + _rep0 = 0; + _rep1 = 0; + _rep2 = 0; + _rep3 = 0; + } + + public void Code(Stream inStream, Stream outStream, + Int64 inSize, Int64 outSize, ICodeProgress progress) + { + if (_outWindow == null) + { + CreateDictionary(); + } + _outWindow.Init(outStream); + if (outSize > 0) + { + _outWindow.SetLimit(outSize); + } + else + { + _outWindow.SetLimit(Int64.MaxValue - _outWindow._total); + } + + RangeCoder.Decoder rangeDecoder = new RangeCoder.Decoder(); + rangeDecoder.Init(inStream); + + Code(_dictionarySize, _outWindow, rangeDecoder); + + _outWindow.ReleaseStream(); + rangeDecoder.ReleaseStream(); + + if (!rangeDecoder.IsFinished || (inSize > 0 && rangeDecoder._total != inSize)) + { + throw new DataErrorException(); + } + if (_outWindow.HasPending) + { + throw new DataErrorException(); + } + _outWindow = null; + } + + internal bool Code(int dictionarySize, OutWindow outWindow, RangeCoder.Decoder rangeDecoder) + { + int dictionarySizeCheck = Math.Max(dictionarySize, 1); + + outWindow.CopyPending(); + + while (outWindow.HasSpace) + { + uint posState = (uint)outWindow._total & _posStateMask; + if (_isMatchDecoders[(_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState].Decode(rangeDecoder) == 0) + { + byte b; + byte prevByte = outWindow.GetByte(0); + if (!_state.IsCharState()) + { + b = _literalDecoder.DecodeWithMatchByte(rangeDecoder, + (uint)outWindow._total, prevByte, + outWindow.GetByte((int)_rep0)); + } + else + { + b = _literalDecoder.DecodeNormal(rangeDecoder, (uint)outWindow._total, prevByte); + } + outWindow.PutByte(b); + _state.UpdateChar(); + } + else + { + uint len; + if (_isRepDecoders[_state._index].Decode(rangeDecoder) == 1) + { + if (_isRepG0Decoders[_state._index].Decode(rangeDecoder) == 0) + { + if ( + _isRep0LongDecoders[(_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState].Decode( + rangeDecoder) == 0) + { + _state.UpdateShortRep(); + outWindow.PutByte(outWindow.GetByte((int)_rep0)); + continue; + } + } + else + { + UInt32 distance; + if (_isRepG1Decoders[_state._index].Decode(rangeDecoder) == 0) + { + distance = _rep1; + } + else + { + if (_isRepG2Decoders[_state._index].Decode(rangeDecoder) == 0) + { + distance = _rep2; + } + else + { + distance = _rep3; + _rep3 = _rep2; + } + _rep2 = _rep1; + } + _rep1 = _rep0; + _rep0 = distance; + } + len = _repLenDecoder.Decode(rangeDecoder, posState) + Base.K_MATCH_MIN_LEN; + _state.UpdateRep(); + } + else + { + _rep3 = _rep2; + _rep2 = _rep1; + _rep1 = _rep0; + len = Base.K_MATCH_MIN_LEN + _lenDecoder.Decode(rangeDecoder, posState); + _state.UpdateMatch(); + uint posSlot = _posSlotDecoder[Base.GetLenToPosState(len)].Decode(rangeDecoder); + if (posSlot >= Base.K_START_POS_MODEL_INDEX) + { + int numDirectBits = (int)((posSlot >> 1) - 1); + _rep0 = ((2 | (posSlot & 1)) << numDirectBits); + if (posSlot < Base.K_END_POS_MODEL_INDEX) + { + _rep0 += BitTreeDecoder.ReverseDecode(_posDecoders, + _rep0 - posSlot - 1, rangeDecoder, numDirectBits); + } + else + { + _rep0 += (rangeDecoder.DecodeDirectBits( + numDirectBits - Base.K_NUM_ALIGN_BITS) << Base.K_NUM_ALIGN_BITS); + _rep0 += _posAlignDecoder.ReverseDecode(rangeDecoder); + } + } + else + { + _rep0 = posSlot; + } + } + if (_rep0 >= outWindow._total || _rep0 >= dictionarySizeCheck) + { + if (_rep0 == 0xFFFFFFFF) + { + return true; + } + throw new DataErrorException(); + } + outWindow.CopyBlock((int)_rep0, (int)len); + } + } + return false; + } + + public void SetDecoderProperties(byte[] properties) + { + if (properties.Length < 1) + { + throw new InvalidParamException(); + } + int lc = properties[0] % 9; + int remainder = properties[0] / 9; + int lp = remainder % 5; + int pb = remainder / 5; + if (pb > Base.K_NUM_POS_STATES_BITS_MAX) + { + throw new InvalidParamException(); + } + SetLiteralProperties(lp, lc); + SetPosBitsProperties(pb); + Init(); + if (properties.Length >= 5) + { + _dictionarySize = 0; + for (int i = 0; i < 4; i++) + { + _dictionarySize += properties[1 + i] << (i * 8); + } + } + } + + public void Train(Stream stream) + { + if (_outWindow == null) + { + CreateDictionary(); + } + _outWindow.Train(stream); + } + + /* + public override bool CanRead { get { return true; }} + public override bool CanWrite { get { return true; }} + public override bool CanSeek { get { return true; }} + public override long Length { get { return 0; }} + public override long Position + { + get { return 0; } + set { } + } + public override void Flush() { } + public override int Read(byte[] buffer, int offset, int count) + { + return 0; + } + public override void Write(byte[] buffer, int offset, int count) + { + } + public override long Seek(long offset, System.IO.SeekOrigin origin) + { + return 0; + } + public override void SetLength(long value) {} + */ + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaEncoder.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaEncoder.cs new file mode 100644 index 0000000000..be604d3280 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaEncoder.cs @@ -0,0 +1,1797 @@ +using System; +using System.IO; +using SharpCompress.Compressors.LZMA.LZ; +using SharpCompress.Compressors.LZMA.RangeCoder; + +namespace SharpCompress.Compressors.LZMA +{ + internal class Encoder : ICoder, ISetCoderProperties, IWriteCoderProperties + { + private enum EMatchFinderType + { + Bt2, + Bt4 + } + + private const UInt32 K_IFINITY_PRICE = 0xFFFFFFF; + + private static readonly Byte[] G_FAST_POS = new Byte[1 << 11]; + + static Encoder() + { + const Byte kFastSlots = 22; + int c = 2; + G_FAST_POS[0] = 0; + G_FAST_POS[1] = 1; + for (Byte slotFast = 2; slotFast < kFastSlots; slotFast++) + { + UInt32 k = ((UInt32)1 << ((slotFast >> 1) - 1)); + for (UInt32 j = 0; j < k; j++, c++) + { + G_FAST_POS[c] = slotFast; + } + } + } + + private static UInt32 GetPosSlot(UInt32 pos) + { + if (pos < (1 << 11)) + { + return G_FAST_POS[pos]; + } + if (pos < (1 << 21)) + { + return (UInt32)(G_FAST_POS[pos >> 10] + 20); + } + return (UInt32)(G_FAST_POS[pos >> 20] + 40); + } + + private static UInt32 GetPosSlot2(UInt32 pos) + { + if (pos < (1 << 17)) + { + return (UInt32)(G_FAST_POS[pos >> 6] + 12); + } + if (pos < (1 << 27)) + { + return (UInt32)(G_FAST_POS[pos >> 16] + 32); + } + return (UInt32)(G_FAST_POS[pos >> 26] + 52); + } + + private Base.State _state = new Base.State(); + private Byte _previousByte; + private readonly UInt32[] _repDistances = new UInt32[Base.K_NUM_REP_DISTANCES]; + + private void BaseInit() + { + _state.Init(); + _previousByte = 0; + for (UInt32 i = 0; i < Base.K_NUM_REP_DISTANCES; i++) + { + _repDistances[i] = 0; + } + } + + private const int K_DEFAULT_DICTIONARY_LOG_SIZE = 22; + private const UInt32 K_NUM_FAST_BYTES_DEFAULT = 0x20; + + private class LiteralEncoder + { + public struct Encoder2 + { + private BitEncoder[] _encoders; + + public void Create() + { + _encoders = new BitEncoder[0x300]; + } + + public void Init() + { + for (int i = 0; i < 0x300; i++) + { + _encoders[i].Init(); + } + } + + public void Encode(RangeCoder.Encoder rangeEncoder, byte symbol) + { + uint context = 1; + for (int i = 7; i >= 0; i--) + { + uint bit = (uint)((symbol >> i) & 1); + _encoders[context].Encode(rangeEncoder, bit); + context = (context << 1) | bit; + } + } + + public void EncodeMatched(RangeCoder.Encoder rangeEncoder, byte matchByte, byte symbol) + { + uint context = 1; + bool same = true; + for (int i = 7; i >= 0; i--) + { + uint bit = (uint)((symbol >> i) & 1); + uint state = context; + if (same) + { + uint matchBit = (uint)((matchByte >> i) & 1); + state += ((1 + matchBit) << 8); + same = (matchBit == bit); + } + _encoders[state].Encode(rangeEncoder, bit); + context = (context << 1) | bit; + } + } + + public uint GetPrice(bool matchMode, byte matchByte, byte symbol) + { + uint price = 0; + uint context = 1; + int i = 7; + if (matchMode) + { + for (; i >= 0; i--) + { + uint matchBit = (uint)(matchByte >> i) & 1; + uint bit = (uint)(symbol >> i) & 1; + price += _encoders[((1 + matchBit) << 8) + context].GetPrice(bit); + context = (context << 1) | bit; + if (matchBit != bit) + { + i--; + break; + } + } + } + for (; i >= 0; i--) + { + uint bit = (uint)(symbol >> i) & 1; + price += _encoders[context].GetPrice(bit); + context = (context << 1) | bit; + } + return price; + } + } + + private Encoder2[] _coders; + private int _numPrevBits; + private int _numPosBits; + private uint _posMask; + + public void Create(int numPosBits, int numPrevBits) + { + if (_coders != null && _numPrevBits == numPrevBits && _numPosBits == numPosBits) + { + return; + } + _numPosBits = numPosBits; + _posMask = ((uint)1 << numPosBits) - 1; + _numPrevBits = numPrevBits; + uint numStates = (uint)1 << (_numPrevBits + _numPosBits); + _coders = new Encoder2[numStates]; + for (uint i = 0; i < numStates; i++) + { + _coders[i].Create(); + } + } + + public void Init() + { + uint numStates = (uint)1 << (_numPrevBits + _numPosBits); + for (uint i = 0; i < numStates; i++) + { + _coders[i].Init(); + } + } + + public Encoder2 GetSubCoder(UInt32 pos, Byte prevByte) + { + return _coders[((pos & _posMask) << _numPrevBits) + (uint)(prevByte >> (8 - _numPrevBits))]; + } + } + + private class LenEncoder + { + private BitEncoder _choice = new BitEncoder(); + private BitEncoder _choice2 = new BitEncoder(); + private readonly BitTreeEncoder[] _lowCoder = new BitTreeEncoder[Base.K_NUM_POS_STATES_ENCODING_MAX]; + private readonly BitTreeEncoder[] _midCoder = new BitTreeEncoder[Base.K_NUM_POS_STATES_ENCODING_MAX]; + private BitTreeEncoder _highCoder = new BitTreeEncoder(Base.K_NUM_HIGH_LEN_BITS); + + public LenEncoder() + { + for (UInt32 posState = 0; posState < Base.K_NUM_POS_STATES_ENCODING_MAX; posState++) + { + _lowCoder[posState] = new BitTreeEncoder(Base.K_NUM_LOW_LEN_BITS); + _midCoder[posState] = new BitTreeEncoder(Base.K_NUM_MID_LEN_BITS); + } + } + + public void Init(UInt32 numPosStates) + { + _choice.Init(); + _choice2.Init(); + for (UInt32 posState = 0; posState < numPosStates; posState++) + { + _lowCoder[posState].Init(); + _midCoder[posState].Init(); + } + _highCoder.Init(); + } + + public void Encode(RangeCoder.Encoder rangeEncoder, UInt32 symbol, UInt32 posState) + { + if (symbol < Base.K_NUM_LOW_LEN_SYMBOLS) + { + _choice.Encode(rangeEncoder, 0); + _lowCoder[posState].Encode(rangeEncoder, symbol); + } + else + { + symbol -= Base.K_NUM_LOW_LEN_SYMBOLS; + _choice.Encode(rangeEncoder, 1); + if (symbol < Base.K_NUM_MID_LEN_SYMBOLS) + { + _choice2.Encode(rangeEncoder, 0); + _midCoder[posState].Encode(rangeEncoder, symbol); + } + else + { + _choice2.Encode(rangeEncoder, 1); + _highCoder.Encode(rangeEncoder, symbol - Base.K_NUM_MID_LEN_SYMBOLS); + } + } + } + + public void SetPrices(UInt32 posState, UInt32 numSymbols, UInt32[] prices, UInt32 st) + { + UInt32 a0 = _choice.GetPrice0(); + UInt32 a1 = _choice.GetPrice1(); + UInt32 b0 = a1 + _choice2.GetPrice0(); + UInt32 b1 = a1 + _choice2.GetPrice1(); + UInt32 i = 0; + for (i = 0; i < Base.K_NUM_LOW_LEN_SYMBOLS; i++) + { + if (i >= numSymbols) + { + return; + } + prices[st + i] = a0 + _lowCoder[posState].GetPrice(i); + } + for (; i < Base.K_NUM_LOW_LEN_SYMBOLS + Base.K_NUM_MID_LEN_SYMBOLS; i++) + { + if (i >= numSymbols) + { + return; + } + prices[st + i] = b0 + _midCoder[posState].GetPrice(i - Base.K_NUM_LOW_LEN_SYMBOLS); + } + for (; i < numSymbols; i++) + { + prices[st + i] = b1 + _highCoder.GetPrice(i - Base.K_NUM_LOW_LEN_SYMBOLS - Base.K_NUM_MID_LEN_SYMBOLS); + } + } + } + + private const UInt32 K_NUM_LEN_SPEC_SYMBOLS = Base.K_NUM_LOW_LEN_SYMBOLS + Base.K_NUM_MID_LEN_SYMBOLS; + + private class LenPriceTableEncoder : LenEncoder + { + private readonly UInt32[] _prices = new UInt32[Base.K_NUM_LEN_SYMBOLS << Base.K_NUM_POS_STATES_BITS_ENCODING_MAX]; + private UInt32 _tableSize; + private readonly UInt32[] _counters = new UInt32[Base.K_NUM_POS_STATES_ENCODING_MAX]; + + public void SetTableSize(UInt32 tableSize) + { + _tableSize = tableSize; + } + + public UInt32 GetPrice(UInt32 symbol, UInt32 posState) + { + return _prices[posState * Base.K_NUM_LEN_SYMBOLS + symbol]; + } + + private void UpdateTable(UInt32 posState) + { + SetPrices(posState, _tableSize, _prices, posState * Base.K_NUM_LEN_SYMBOLS); + _counters[posState] = _tableSize; + } + + public void UpdateTables(UInt32 numPosStates) + { + for (UInt32 posState = 0; posState < numPosStates; posState++) + { + UpdateTable(posState); + } + } + + public new void Encode(RangeCoder.Encoder rangeEncoder, UInt32 symbol, UInt32 posState) + { + base.Encode(rangeEncoder, symbol, posState); + if (--_counters[posState] == 0) + { + UpdateTable(posState); + } + } + } + + private const UInt32 K_NUM_OPTS = 1 << 12; + + private class Optimal + { + public Base.State _state; + + public bool _prev1IsChar; + public bool _prev2; + + public UInt32 _posPrev2; + public UInt32 _backPrev2; + + public UInt32 _price; + public UInt32 _posPrev; + public UInt32 _backPrev; + + public UInt32 _backs0; + public UInt32 _backs1; + public UInt32 _backs2; + public UInt32 _backs3; + + public void MakeAsChar() + { + _backPrev = 0xFFFFFFFF; + _prev1IsChar = false; + } + + public void MakeAsShortRep() + { + _backPrev = 0; + ; + _prev1IsChar = false; + } + + public bool IsShortRep() + { + return (_backPrev == 0); + } + } + + private readonly Optimal[] _optimum = new Optimal[K_NUM_OPTS]; + private BinTree _matchFinder; + private readonly RangeCoder.Encoder _rangeEncoder = new RangeCoder.Encoder(); + + private readonly BitEncoder[] _isMatch = + new BitEncoder[Base.K_NUM_STATES << Base.K_NUM_POS_STATES_BITS_MAX]; + + private readonly BitEncoder[] _isRep = new BitEncoder[Base.K_NUM_STATES]; + private readonly BitEncoder[] _isRepG0 = new BitEncoder[Base.K_NUM_STATES]; + private readonly BitEncoder[] _isRepG1 = new BitEncoder[Base.K_NUM_STATES]; + private readonly BitEncoder[] _isRepG2 = new BitEncoder[Base.K_NUM_STATES]; + + private readonly BitEncoder[] _isRep0Long = + new BitEncoder[Base.K_NUM_STATES << Base.K_NUM_POS_STATES_BITS_MAX]; + + private readonly BitTreeEncoder[] _posSlotEncoder = new BitTreeEncoder[Base.K_NUM_LEN_TO_POS_STATES]; + + private readonly BitEncoder[] _posEncoders = + new BitEncoder[Base.K_NUM_FULL_DISTANCES - Base.K_END_POS_MODEL_INDEX]; + + private BitTreeEncoder _posAlignEncoder = new BitTreeEncoder(Base.K_NUM_ALIGN_BITS); + + private readonly LenPriceTableEncoder _lenEncoder = new LenPriceTableEncoder(); + private readonly LenPriceTableEncoder _repMatchLenEncoder = new LenPriceTableEncoder(); + + private readonly LiteralEncoder _literalEncoder = new LiteralEncoder(); + + private readonly UInt32[] _matchDistances = new UInt32[Base.K_MATCH_MAX_LEN * 2 + 2]; + + private UInt32 _numFastBytes = K_NUM_FAST_BYTES_DEFAULT; + private UInt32 _longestMatchLength; + private UInt32 _numDistancePairs; + + private UInt32 _additionalOffset; + + private UInt32 _optimumEndIndex; + private UInt32 _optimumCurrentIndex; + + private bool _longestMatchWasFound; + + private readonly UInt32[] _posSlotPrices = new UInt32[1 << (Base.K_NUM_POS_SLOT_BITS + Base.K_NUM_LEN_TO_POS_STATES_BITS)]; + private readonly UInt32[] _distancesPrices = new UInt32[Base.K_NUM_FULL_DISTANCES << Base.K_NUM_LEN_TO_POS_STATES_BITS]; + private readonly UInt32[] _alignPrices = new UInt32[Base.K_ALIGN_TABLE_SIZE]; + private UInt32 _alignPriceCount; + + private UInt32 _distTableSize = (K_DEFAULT_DICTIONARY_LOG_SIZE * 2); + + private int _posStateBits = 2; + private UInt32 _posStateMask = (4 - 1); + private int _numLiteralPosStateBits; + private int _numLiteralContextBits = 3; + + private UInt32 _dictionarySize = (1 << K_DEFAULT_DICTIONARY_LOG_SIZE); + private UInt32 _dictionarySizePrev = 0xFFFFFFFF; + private UInt32 _numFastBytesPrev = 0xFFFFFFFF; + + private Int64 _nowPos64; + private bool _finished; + private Stream _inStream; + + private EMatchFinderType _matchFinderType = EMatchFinderType.Bt4; + private bool _writeEndMark; + + private bool _needReleaseMfStream; + private bool _processingMode; + + private void Create() + { + if (_matchFinder == null) + { + BinTree bt = new BinTree(); + int numHashBytes = 4; + if (_matchFinderType == EMatchFinderType.Bt2) + { + numHashBytes = 2; + } + bt.SetType(numHashBytes); + _matchFinder = bt; + } + _literalEncoder.Create(_numLiteralPosStateBits, _numLiteralContextBits); + + if (_dictionarySize == _dictionarySizePrev && _numFastBytesPrev == _numFastBytes) + { + return; + } + _matchFinder.Create(_dictionarySize, K_NUM_OPTS, _numFastBytes, Base.K_MATCH_MAX_LEN + 1 + K_NUM_OPTS); + _dictionarySizePrev = _dictionarySize; + _numFastBytesPrev = _numFastBytes; + } + + public Encoder() + { + for (int i = 0; i < K_NUM_OPTS; i++) + { + _optimum[i] = new Optimal(); + } + for (int i = 0; i < Base.K_NUM_LEN_TO_POS_STATES; i++) + { + _posSlotEncoder[i] = new BitTreeEncoder(Base.K_NUM_POS_SLOT_BITS); + } + } + + private void SetWriteEndMarkerMode(bool writeEndMarker) + { + _writeEndMark = writeEndMarker; + } + + private void Init() + { + BaseInit(); + _rangeEncoder.Init(); + + uint i; + for (i = 0; i < Base.K_NUM_STATES; i++) + { + for (uint j = 0; j <= _posStateMask; j++) + { + uint complexState = (i << Base.K_NUM_POS_STATES_BITS_MAX) + j; + _isMatch[complexState].Init(); + _isRep0Long[complexState].Init(); + } + _isRep[i].Init(); + _isRepG0[i].Init(); + _isRepG1[i].Init(); + _isRepG2[i].Init(); + } + _literalEncoder.Init(); + for (i = 0; i < Base.K_NUM_LEN_TO_POS_STATES; i++) + { + _posSlotEncoder[i].Init(); + } + for (i = 0; i < Base.K_NUM_FULL_DISTANCES - Base.K_END_POS_MODEL_INDEX; i++) + { + _posEncoders[i].Init(); + } + + _lenEncoder.Init((UInt32)1 << _posStateBits); + _repMatchLenEncoder.Init((UInt32)1 << _posStateBits); + + _posAlignEncoder.Init(); + + _longestMatchWasFound = false; + _optimumEndIndex = 0; + _optimumCurrentIndex = 0; + _additionalOffset = 0; + } + + private void ReadMatchDistances(out UInt32 lenRes, out UInt32 numDistancePairs) + { + lenRes = 0; + numDistancePairs = _matchFinder.GetMatches(_matchDistances); + if (numDistancePairs > 0) + { + lenRes = _matchDistances[numDistancePairs - 2]; + if (lenRes == _numFastBytes) + { + lenRes += _matchFinder.GetMatchLen((int)lenRes - 1, _matchDistances[numDistancePairs - 1], + Base.K_MATCH_MAX_LEN - lenRes); + } + } + _additionalOffset++; + } + + private void MovePos(UInt32 num) + { + if (num > 0) + { + _matchFinder.Skip(num); + _additionalOffset += num; + } + } + + private UInt32 GetRepLen1Price(Base.State state, UInt32 posState) + { + return _isRepG0[state._index].GetPrice0() + + _isRep0Long[(state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState].GetPrice0(); + } + + private UInt32 GetPureRepPrice(UInt32 repIndex, Base.State state, UInt32 posState) + { + UInt32 price; + if (repIndex == 0) + { + price = _isRepG0[state._index].GetPrice0(); + price += _isRep0Long[(state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState].GetPrice1(); + } + else + { + price = _isRepG0[state._index].GetPrice1(); + if (repIndex == 1) + { + price += _isRepG1[state._index].GetPrice0(); + } + else + { + price += _isRepG1[state._index].GetPrice1(); + price += _isRepG2[state._index].GetPrice(repIndex - 2); + } + } + return price; + } + + private UInt32 GetRepPrice(UInt32 repIndex, UInt32 len, Base.State state, UInt32 posState) + { + UInt32 price = _repMatchLenEncoder.GetPrice(len - Base.K_MATCH_MIN_LEN, posState); + return price + GetPureRepPrice(repIndex, state, posState); + } + + private UInt32 GetPosLenPrice(UInt32 pos, UInt32 len, UInt32 posState) + { + UInt32 price; + UInt32 lenToPosState = Base.GetLenToPosState(len); + if (pos < Base.K_NUM_FULL_DISTANCES) + { + price = _distancesPrices[(lenToPosState * Base.K_NUM_FULL_DISTANCES) + pos]; + } + else + { + price = _posSlotPrices[(lenToPosState << Base.K_NUM_POS_SLOT_BITS) + GetPosSlot2(pos)] + + _alignPrices[pos & Base.K_ALIGN_MASK]; + } + return price + _lenEncoder.GetPrice(len - Base.K_MATCH_MIN_LEN, posState); + } + + private UInt32 Backward(out UInt32 backRes, UInt32 cur) + { + _optimumEndIndex = cur; + UInt32 posMem = _optimum[cur]._posPrev; + UInt32 backMem = _optimum[cur]._backPrev; + do + { + if (_optimum[cur]._prev1IsChar) + { + _optimum[posMem].MakeAsChar(); + _optimum[posMem]._posPrev = posMem - 1; + if (_optimum[cur]._prev2) + { + _optimum[posMem - 1]._prev1IsChar = false; + _optimum[posMem - 1]._posPrev = _optimum[cur]._posPrev2; + _optimum[posMem - 1]._backPrev = _optimum[cur]._backPrev2; + } + } + UInt32 posPrev = posMem; + UInt32 backCur = backMem; + + backMem = _optimum[posPrev]._backPrev; + posMem = _optimum[posPrev]._posPrev; + + _optimum[posPrev]._backPrev = backCur; + _optimum[posPrev]._posPrev = cur; + cur = posPrev; + } + while (cur > 0); + backRes = _optimum[0]._backPrev; + _optimumCurrentIndex = _optimum[0]._posPrev; + return _optimumCurrentIndex; + } + + private readonly UInt32[] _reps = new UInt32[Base.K_NUM_REP_DISTANCES]; + private readonly UInt32[] _repLens = new UInt32[Base.K_NUM_REP_DISTANCES]; + + private UInt32 GetOptimum(UInt32 position, out UInt32 backRes) + { + if (_optimumEndIndex != _optimumCurrentIndex) + { + UInt32 lenRes = _optimum[_optimumCurrentIndex]._posPrev - _optimumCurrentIndex; + backRes = _optimum[_optimumCurrentIndex]._backPrev; + _optimumCurrentIndex = _optimum[_optimumCurrentIndex]._posPrev; + return lenRes; + } + _optimumCurrentIndex = _optimumEndIndex = 0; + + UInt32 lenMain, numDistancePairs; + if (!_longestMatchWasFound) + { + ReadMatchDistances(out lenMain, out numDistancePairs); + } + else + { + lenMain = _longestMatchLength; + numDistancePairs = _numDistancePairs; + _longestMatchWasFound = false; + } + + UInt32 numAvailableBytes = _matchFinder.GetNumAvailableBytes() + 1; + if (numAvailableBytes < 2) + { + backRes = 0xFFFFFFFF; + return 1; + } + if (numAvailableBytes > Base.K_MATCH_MAX_LEN) + { + numAvailableBytes = Base.K_MATCH_MAX_LEN; + } + + UInt32 repMaxIndex = 0; + UInt32 i; + for (i = 0; i < Base.K_NUM_REP_DISTANCES; i++) + { + _reps[i] = _repDistances[i]; + _repLens[i] = _matchFinder.GetMatchLen(0 - 1, _reps[i], Base.K_MATCH_MAX_LEN); + if (_repLens[i] > _repLens[repMaxIndex]) + { + repMaxIndex = i; + } + } + if (_repLens[repMaxIndex] >= _numFastBytes) + { + backRes = repMaxIndex; + UInt32 lenRes = _repLens[repMaxIndex]; + MovePos(lenRes - 1); + return lenRes; + } + + if (lenMain >= _numFastBytes) + { + backRes = _matchDistances[numDistancePairs - 1] + Base.K_NUM_REP_DISTANCES; + MovePos(lenMain - 1); + return lenMain; + } + + Byte currentByte = _matchFinder.GetIndexByte(0 - 1); + Byte matchByte = _matchFinder.GetIndexByte((Int32)(0 - _repDistances[0] - 1 - 1)); + + if (lenMain < 2 && currentByte != matchByte && _repLens[repMaxIndex] < 2) + { + backRes = 0xFFFFFFFF; + return 1; + } + + _optimum[0]._state = _state; + + UInt32 posState = (position & _posStateMask); + + _optimum[1]._price = _isMatch[(_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState].GetPrice0() + + _literalEncoder.GetSubCoder(position, _previousByte) + .GetPrice(!_state.IsCharState(), matchByte, currentByte); + _optimum[1].MakeAsChar(); + + UInt32 matchPrice = _isMatch[(_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState].GetPrice1(); + UInt32 repMatchPrice = matchPrice + _isRep[_state._index].GetPrice1(); + + if (matchByte == currentByte) + { + UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(_state, posState); + if (shortRepPrice < _optimum[1]._price) + { + _optimum[1]._price = shortRepPrice; + _optimum[1].MakeAsShortRep(); + } + } + + UInt32 lenEnd = ((lenMain >= _repLens[repMaxIndex]) ? lenMain : _repLens[repMaxIndex]); + + if (lenEnd < 2) + { + backRes = _optimum[1]._backPrev; + return 1; + } + + _optimum[1]._posPrev = 0; + + _optimum[0]._backs0 = _reps[0]; + _optimum[0]._backs1 = _reps[1]; + _optimum[0]._backs2 = _reps[2]; + _optimum[0]._backs3 = _reps[3]; + + UInt32 len = lenEnd; + do + { + _optimum[len--]._price = K_IFINITY_PRICE; + } + while (len >= 2); + + for (i = 0; i < Base.K_NUM_REP_DISTANCES; i++) + { + UInt32 repLen = _repLens[i]; + if (repLen < 2) + { + continue; + } + UInt32 price = repMatchPrice + GetPureRepPrice(i, _state, posState); + do + { + UInt32 curAndLenPrice = price + _repMatchLenEncoder.GetPrice(repLen - 2, posState); + Optimal optimum = _optimum[repLen]; + if (curAndLenPrice < optimum._price) + { + optimum._price = curAndLenPrice; + optimum._posPrev = 0; + optimum._backPrev = i; + optimum._prev1IsChar = false; + } + } + while (--repLen >= 2); + } + + UInt32 normalMatchPrice = matchPrice + _isRep[_state._index].GetPrice0(); + + len = ((_repLens[0] >= 2) ? _repLens[0] + 1 : 2); + if (len <= lenMain) + { + UInt32 offs = 0; + while (len > _matchDistances[offs]) + { + offs += 2; + } + for (;; len++) + { + UInt32 distance = _matchDistances[offs + 1]; + UInt32 curAndLenPrice = normalMatchPrice + GetPosLenPrice(distance, len, posState); + Optimal optimum = _optimum[len]; + if (curAndLenPrice < optimum._price) + { + optimum._price = curAndLenPrice; + optimum._posPrev = 0; + optimum._backPrev = distance + Base.K_NUM_REP_DISTANCES; + optimum._prev1IsChar = false; + } + if (len == _matchDistances[offs]) + { + offs += 2; + if (offs == numDistancePairs) + { + break; + } + } + } + } + + UInt32 cur = 0; + + while (true) + { + cur++; + if (cur == lenEnd) + { + return Backward(out backRes, cur); + } + UInt32 newLen; + ReadMatchDistances(out newLen, out numDistancePairs); + if (newLen >= _numFastBytes) + { + _numDistancePairs = numDistancePairs; + _longestMatchLength = newLen; + _longestMatchWasFound = true; + return Backward(out backRes, cur); + } + position++; + UInt32 posPrev = _optimum[cur]._posPrev; + Base.State state; + if (_optimum[cur]._prev1IsChar) + { + posPrev--; + if (_optimum[cur]._prev2) + { + state = _optimum[_optimum[cur]._posPrev2]._state; + if (_optimum[cur]._backPrev2 < Base.K_NUM_REP_DISTANCES) + { + state.UpdateRep(); + } + else + { + state.UpdateMatch(); + } + } + else + { + state = _optimum[posPrev]._state; + } + state.UpdateChar(); + } + else + { + state = _optimum[posPrev]._state; + } + if (posPrev == cur - 1) + { + if (_optimum[cur].IsShortRep()) + { + state.UpdateShortRep(); + } + else + { + state.UpdateChar(); + } + } + else + { + UInt32 pos; + if (_optimum[cur]._prev1IsChar && _optimum[cur]._prev2) + { + posPrev = _optimum[cur]._posPrev2; + pos = _optimum[cur]._backPrev2; + state.UpdateRep(); + } + else + { + pos = _optimum[cur]._backPrev; + if (pos < Base.K_NUM_REP_DISTANCES) + { + state.UpdateRep(); + } + else + { + state.UpdateMatch(); + } + } + Optimal opt = _optimum[posPrev]; + if (pos < Base.K_NUM_REP_DISTANCES) + { + if (pos == 0) + { + _reps[0] = opt._backs0; + _reps[1] = opt._backs1; + _reps[2] = opt._backs2; + _reps[3] = opt._backs3; + } + else if (pos == 1) + { + _reps[0] = opt._backs1; + _reps[1] = opt._backs0; + _reps[2] = opt._backs2; + _reps[3] = opt._backs3; + } + else if (pos == 2) + { + _reps[0] = opt._backs2; + _reps[1] = opt._backs0; + _reps[2] = opt._backs1; + _reps[3] = opt._backs3; + } + else + { + _reps[0] = opt._backs3; + _reps[1] = opt._backs0; + _reps[2] = opt._backs1; + _reps[3] = opt._backs2; + } + } + else + { + _reps[0] = (pos - Base.K_NUM_REP_DISTANCES); + _reps[1] = opt._backs0; + _reps[2] = opt._backs1; + _reps[3] = opt._backs2; + } + } + _optimum[cur]._state = state; + _optimum[cur]._backs0 = _reps[0]; + _optimum[cur]._backs1 = _reps[1]; + _optimum[cur]._backs2 = _reps[2]; + _optimum[cur]._backs3 = _reps[3]; + UInt32 curPrice = _optimum[cur]._price; + + currentByte = _matchFinder.GetIndexByte(0 - 1); + matchByte = _matchFinder.GetIndexByte((Int32)(0 - _reps[0] - 1 - 1)); + + posState = (position & _posStateMask); + + UInt32 curAnd1Price = curPrice + + _isMatch[(state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState].GetPrice0() + + _literalEncoder.GetSubCoder(position, _matchFinder.GetIndexByte(0 - 2)). + GetPrice(!state.IsCharState(), matchByte, currentByte); + + Optimal nextOptimum = _optimum[cur + 1]; + + bool nextIsChar = false; + if (curAnd1Price < nextOptimum._price) + { + nextOptimum._price = curAnd1Price; + nextOptimum._posPrev = cur; + nextOptimum.MakeAsChar(); + nextIsChar = true; + } + + matchPrice = curPrice + _isMatch[(state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState].GetPrice1(); + repMatchPrice = matchPrice + _isRep[state._index].GetPrice1(); + + if (matchByte == currentByte && + !(nextOptimum._posPrev < cur && nextOptimum._backPrev == 0)) + { + UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(state, posState); + if (shortRepPrice <= nextOptimum._price) + { + nextOptimum._price = shortRepPrice; + nextOptimum._posPrev = cur; + nextOptimum.MakeAsShortRep(); + nextIsChar = true; + } + } + + UInt32 numAvailableBytesFull = _matchFinder.GetNumAvailableBytes() + 1; + numAvailableBytesFull = Math.Min(K_NUM_OPTS - 1 - cur, numAvailableBytesFull); + numAvailableBytes = numAvailableBytesFull; + + if (numAvailableBytes < 2) + { + continue; + } + if (numAvailableBytes > _numFastBytes) + { + numAvailableBytes = _numFastBytes; + } + if (!nextIsChar && matchByte != currentByte) + { + // try Literal + rep0 + UInt32 t = Math.Min(numAvailableBytesFull - 1, _numFastBytes); + UInt32 lenTest2 = _matchFinder.GetMatchLen(0, _reps[0], t); + if (lenTest2 >= 2) + { + Base.State state2 = state; + state2.UpdateChar(); + UInt32 posStateNext = (position + 1) & _posStateMask; + UInt32 nextRepMatchPrice = curAnd1Price + + _isMatch[(state2._index << Base.K_NUM_POS_STATES_BITS_MAX) + posStateNext] + .GetPrice1() + + _isRep[state2._index].GetPrice1(); + { + UInt32 offset = cur + 1 + lenTest2; + while (lenEnd < offset) + { + _optimum[++lenEnd]._price = K_IFINITY_PRICE; + } + UInt32 curAndLenPrice = nextRepMatchPrice + GetRepPrice( + 0, lenTest2, state2, posStateNext); + Optimal optimum = _optimum[offset]; + if (curAndLenPrice < optimum._price) + { + optimum._price = curAndLenPrice; + optimum._posPrev = cur + 1; + optimum._backPrev = 0; + optimum._prev1IsChar = true; + optimum._prev2 = false; + } + } + } + } + + UInt32 startLen = 2; // speed optimization + + for (UInt32 repIndex = 0; repIndex < Base.K_NUM_REP_DISTANCES; repIndex++) + { + UInt32 lenTest = _matchFinder.GetMatchLen(0 - 1, _reps[repIndex], numAvailableBytes); + if (lenTest < 2) + { + continue; + } + UInt32 lenTestTemp = lenTest; + do + { + while (lenEnd < cur + lenTest) + { + _optimum[++lenEnd]._price = K_IFINITY_PRICE; + } + UInt32 curAndLenPrice = repMatchPrice + GetRepPrice(repIndex, lenTest, state, posState); + Optimal optimum = _optimum[cur + lenTest]; + if (curAndLenPrice < optimum._price) + { + optimum._price = curAndLenPrice; + optimum._posPrev = cur; + optimum._backPrev = repIndex; + optimum._prev1IsChar = false; + } + } + while (--lenTest >= 2); + lenTest = lenTestTemp; + + if (repIndex == 0) + { + startLen = lenTest + 1; + } + + // if (_maxMode) + if (lenTest < numAvailableBytesFull) + { + UInt32 t = Math.Min(numAvailableBytesFull - 1 - lenTest, _numFastBytes); + UInt32 lenTest2 = _matchFinder.GetMatchLen((Int32)lenTest, _reps[repIndex], t); + if (lenTest2 >= 2) + { + Base.State state2 = state; + state2.UpdateRep(); + UInt32 posStateNext = (position + lenTest) & _posStateMask; + UInt32 curAndLenCharPrice = + repMatchPrice + GetRepPrice(repIndex, lenTest, state, posState) + + _isMatch[(state2._index << Base.K_NUM_POS_STATES_BITS_MAX) + posStateNext].GetPrice0() + + _literalEncoder.GetSubCoder(position + lenTest, + _matchFinder.GetIndexByte((Int32)lenTest - 1 - 1)) + .GetPrice(true, + _matchFinder.GetIndexByte( + (Int32)lenTest - 1 - (Int32)(_reps[repIndex] + 1)), + _matchFinder.GetIndexByte((Int32)lenTest - 1)); + state2.UpdateChar(); + posStateNext = (position + lenTest + 1) & _posStateMask; + UInt32 nextMatchPrice = curAndLenCharPrice + + _isMatch[(state2._index << Base.K_NUM_POS_STATES_BITS_MAX) + posStateNext] + .GetPrice1(); + UInt32 nextRepMatchPrice = nextMatchPrice + _isRep[state2._index].GetPrice1(); + + // for(; lenTest2 >= 2; lenTest2--) + { + UInt32 offset = lenTest + 1 + lenTest2; + while (lenEnd < cur + offset) + { + _optimum[++lenEnd]._price = K_IFINITY_PRICE; + } + UInt32 curAndLenPrice = nextRepMatchPrice + + GetRepPrice(0, lenTest2, state2, posStateNext); + Optimal optimum = _optimum[cur + offset]; + if (curAndLenPrice < optimum._price) + { + optimum._price = curAndLenPrice; + optimum._posPrev = cur + lenTest + 1; + optimum._backPrev = 0; + optimum._prev1IsChar = true; + optimum._prev2 = true; + optimum._posPrev2 = cur; + optimum._backPrev2 = repIndex; + } + } + } + } + } + + if (newLen > numAvailableBytes) + { + newLen = numAvailableBytes; + for (numDistancePairs = 0; newLen > _matchDistances[numDistancePairs]; numDistancePairs += 2) + { + ; + } + _matchDistances[numDistancePairs] = newLen; + numDistancePairs += 2; + } + if (newLen >= startLen) + { + normalMatchPrice = matchPrice + _isRep[state._index].GetPrice0(); + while (lenEnd < cur + newLen) + { + _optimum[++lenEnd]._price = K_IFINITY_PRICE; + } + + UInt32 offs = 0; + while (startLen > _matchDistances[offs]) + { + offs += 2; + } + + for (UInt32 lenTest = startLen;; lenTest++) + { + UInt32 curBack = _matchDistances[offs + 1]; + UInt32 curAndLenPrice = normalMatchPrice + GetPosLenPrice(curBack, lenTest, posState); + Optimal optimum = _optimum[cur + lenTest]; + if (curAndLenPrice < optimum._price) + { + optimum._price = curAndLenPrice; + optimum._posPrev = cur; + optimum._backPrev = curBack + Base.K_NUM_REP_DISTANCES; + optimum._prev1IsChar = false; + } + + if (lenTest == _matchDistances[offs]) + { + if (lenTest < numAvailableBytesFull) + { + UInt32 t = Math.Min(numAvailableBytesFull - 1 - lenTest, _numFastBytes); + UInt32 lenTest2 = _matchFinder.GetMatchLen((Int32)lenTest, curBack, t); + if (lenTest2 >= 2) + { + Base.State state2 = state; + state2.UpdateMatch(); + UInt32 posStateNext = (position + lenTest) & _posStateMask; + UInt32 curAndLenCharPrice = curAndLenPrice + + _isMatch[ + (state2._index << Base.K_NUM_POS_STATES_BITS_MAX) + + posStateNext].GetPrice0() + + _literalEncoder.GetSubCoder(position + lenTest, + _matchFinder.GetIndexByte( + (Int32)lenTest - 1 - 1)) + . + GetPrice(true, + _matchFinder.GetIndexByte( + (Int32)lenTest - + (Int32)(curBack + 1) - 1), + _matchFinder.GetIndexByte( + (Int32)lenTest - 1)); + state2.UpdateChar(); + posStateNext = (position + lenTest + 1) & _posStateMask; + UInt32 nextMatchPrice = curAndLenCharPrice + + _isMatch[ + (state2._index << Base.K_NUM_POS_STATES_BITS_MAX) + + posStateNext].GetPrice1(); + UInt32 nextRepMatchPrice = nextMatchPrice + _isRep[state2._index].GetPrice1(); + + UInt32 offset = lenTest + 1 + lenTest2; + while (lenEnd < cur + offset) + { + _optimum[++lenEnd]._price = K_IFINITY_PRICE; + } + curAndLenPrice = nextRepMatchPrice + GetRepPrice(0, lenTest2, state2, posStateNext); + optimum = _optimum[cur + offset]; + if (curAndLenPrice < optimum._price) + { + optimum._price = curAndLenPrice; + optimum._posPrev = cur + lenTest + 1; + optimum._backPrev = 0; + optimum._prev1IsChar = true; + optimum._prev2 = true; + optimum._posPrev2 = cur; + optimum._backPrev2 = curBack + Base.K_NUM_REP_DISTANCES; + } + } + } + offs += 2; + if (offs == numDistancePairs) + { + break; + } + } + } + } + } + } + + private bool ChangePair(UInt32 smallDist, UInt32 bigDist) + { + const int kDif = 7; + return (smallDist < ((UInt32)(1) << (32 - kDif)) && bigDist >= (smallDist << kDif)); + } + + private void WriteEndMarker(UInt32 posState) + { + if (!_writeEndMark) + { + return; + } + + _isMatch[(_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState].Encode(_rangeEncoder, 1); + _isRep[_state._index].Encode(_rangeEncoder, 0); + _state.UpdateMatch(); + UInt32 len = Base.K_MATCH_MIN_LEN; + _lenEncoder.Encode(_rangeEncoder, len - Base.K_MATCH_MIN_LEN, posState); + UInt32 posSlot = (1 << Base.K_NUM_POS_SLOT_BITS) - 1; + UInt32 lenToPosState = Base.GetLenToPosState(len); + _posSlotEncoder[lenToPosState].Encode(_rangeEncoder, posSlot); + int footerBits = 30; + UInt32 posReduced = (((UInt32)1) << footerBits) - 1; + _rangeEncoder.EncodeDirectBits(posReduced >> Base.K_NUM_ALIGN_BITS, footerBits - Base.K_NUM_ALIGN_BITS); + _posAlignEncoder.ReverseEncode(_rangeEncoder, posReduced & Base.K_ALIGN_MASK); + } + + private void Flush(UInt32 nowPos) + { + ReleaseMfStream(); + WriteEndMarker(nowPos & _posStateMask); + _rangeEncoder.FlushData(); + _rangeEncoder.FlushStream(); + } + + public void CodeOneBlock(out Int64 inSize, out Int64 outSize, out bool finished) + { + inSize = 0; + outSize = 0; + finished = true; + + if (_inStream != null) + { + _matchFinder.SetStream(_inStream); + _needReleaseMfStream = true; + _inStream = null; + } + + if (_finished) + { + return; + } + _finished = true; + + Int64 progressPosValuePrev = _nowPos64; + if (_nowPos64 == 0) + { + if (_trainSize > 0) + { + for (; _trainSize > 0 && (!_processingMode || !_matchFinder.IsDataStarved); _trainSize--) + { + _matchFinder.Skip(1); + } + if (_trainSize == 0) + { + _previousByte = _matchFinder.GetIndexByte(-1); + } + } + if (_processingMode && _matchFinder.IsDataStarved) + { + _finished = false; + return; + } + if (_matchFinder.GetNumAvailableBytes() == 0) + { + Flush((UInt32)_nowPos64); + return; + } + UInt32 len, numDistancePairs; // it's not used + ReadMatchDistances(out len, out numDistancePairs); + UInt32 posState = (UInt32)(_nowPos64) & _posStateMask; + _isMatch[(_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState].Encode(_rangeEncoder, 0); + _state.UpdateChar(); + Byte curByte = _matchFinder.GetIndexByte((Int32)(0 - _additionalOffset)); + _literalEncoder.GetSubCoder((UInt32)(_nowPos64), _previousByte).Encode(_rangeEncoder, curByte); + _previousByte = curByte; + _additionalOffset--; + _nowPos64++; + } + if (_processingMode && _matchFinder.IsDataStarved) + { + _finished = false; + return; + } + if (_matchFinder.GetNumAvailableBytes() == 0) + { + Flush((UInt32)_nowPos64); + return; + } + while (true) + { + if (_processingMode && _matchFinder.IsDataStarved) + { + _finished = false; + return; + } + + UInt32 pos; + UInt32 len = GetOptimum((UInt32)_nowPos64, out pos); + + UInt32 posState = ((UInt32)_nowPos64) & _posStateMask; + UInt32 complexState = (_state._index << Base.K_NUM_POS_STATES_BITS_MAX) + posState; + if (len == 1 && pos == 0xFFFFFFFF) + { + _isMatch[complexState].Encode(_rangeEncoder, 0); + Byte curByte = _matchFinder.GetIndexByte((Int32)(0 - _additionalOffset)); + LiteralEncoder.Encoder2 subCoder = _literalEncoder.GetSubCoder((UInt32)_nowPos64, _previousByte); + if (!_state.IsCharState()) + { + Byte matchByte = + _matchFinder.GetIndexByte((Int32)(0 - _repDistances[0] - 1 - _additionalOffset)); + subCoder.EncodeMatched(_rangeEncoder, matchByte, curByte); + } + else + { + subCoder.Encode(_rangeEncoder, curByte); + } + _previousByte = curByte; + _state.UpdateChar(); + } + else + { + _isMatch[complexState].Encode(_rangeEncoder, 1); + if (pos < Base.K_NUM_REP_DISTANCES) + { + _isRep[_state._index].Encode(_rangeEncoder, 1); + if (pos == 0) + { + _isRepG0[_state._index].Encode(_rangeEncoder, 0); + if (len == 1) + { + _isRep0Long[complexState].Encode(_rangeEncoder, 0); + } + else + { + _isRep0Long[complexState].Encode(_rangeEncoder, 1); + } + } + else + { + _isRepG0[_state._index].Encode(_rangeEncoder, 1); + if (pos == 1) + { + _isRepG1[_state._index].Encode(_rangeEncoder, 0); + } + else + { + _isRepG1[_state._index].Encode(_rangeEncoder, 1); + _isRepG2[_state._index].Encode(_rangeEncoder, pos - 2); + } + } + if (len == 1) + { + _state.UpdateShortRep(); + } + else + { + _repMatchLenEncoder.Encode(_rangeEncoder, len - Base.K_MATCH_MIN_LEN, posState); + _state.UpdateRep(); + } + UInt32 distance = _repDistances[pos]; + if (pos != 0) + { + for (UInt32 i = pos; i >= 1; i--) + { + _repDistances[i] = _repDistances[i - 1]; + } + _repDistances[0] = distance; + } + } + else + { + _isRep[_state._index].Encode(_rangeEncoder, 0); + _state.UpdateMatch(); + _lenEncoder.Encode(_rangeEncoder, len - Base.K_MATCH_MIN_LEN, posState); + pos -= Base.K_NUM_REP_DISTANCES; + UInt32 posSlot = GetPosSlot(pos); + UInt32 lenToPosState = Base.GetLenToPosState(len); + _posSlotEncoder[lenToPosState].Encode(_rangeEncoder, posSlot); + + if (posSlot >= Base.K_START_POS_MODEL_INDEX) + { + int footerBits = (int)((posSlot >> 1) - 1); + UInt32 baseVal = ((2 | (posSlot & 1)) << footerBits); + UInt32 posReduced = pos - baseVal; + + if (posSlot < Base.K_END_POS_MODEL_INDEX) + { + BitTreeEncoder.ReverseEncode(_posEncoders, + baseVal - posSlot - 1, _rangeEncoder, footerBits, + posReduced); + } + else + { + _rangeEncoder.EncodeDirectBits(posReduced >> Base.K_NUM_ALIGN_BITS, + footerBits - Base.K_NUM_ALIGN_BITS); + _posAlignEncoder.ReverseEncode(_rangeEncoder, posReduced & Base.K_ALIGN_MASK); + _alignPriceCount++; + } + } + UInt32 distance = pos; + for (UInt32 i = Base.K_NUM_REP_DISTANCES - 1; i >= 1; i--) + { + _repDistances[i] = _repDistances[i - 1]; + } + _repDistances[0] = distance; + _matchPriceCount++; + } + _previousByte = _matchFinder.GetIndexByte((Int32)(len - 1 - _additionalOffset)); + } + _additionalOffset -= len; + _nowPos64 += len; + if (_additionalOffset == 0) + { + // if (!_fastMode) + if (_matchPriceCount >= (1 << 7)) + { + FillDistancesPrices(); + } + if (_alignPriceCount >= Base.K_ALIGN_TABLE_SIZE) + { + FillAlignPrices(); + } + inSize = _nowPos64; + outSize = _rangeEncoder.GetProcessedSizeAdd(); + if (_processingMode && _matchFinder.IsDataStarved) + { + _finished = false; + return; + } + if (_matchFinder.GetNumAvailableBytes() == 0) + { + Flush((UInt32)_nowPos64); + return; + } + + if (_nowPos64 - progressPosValuePrev >= (1 << 12)) + { + _finished = false; + finished = false; + return; + } + } + } + } + + private void ReleaseMfStream() + { + if (_matchFinder != null && _needReleaseMfStream) + { + _matchFinder.ReleaseStream(); + _needReleaseMfStream = false; + } + } + + private void SetOutStream(Stream outStream) + { + _rangeEncoder.SetStream(outStream); + } + + private void ReleaseOutStream() + { + _rangeEncoder.ReleaseStream(); + } + + private void ReleaseStreams() + { + ReleaseMfStream(); + ReleaseOutStream(); + } + + public void SetStreams(Stream inStream, Stream outStream, + Int64 inSize, Int64 outSize) + { + _inStream = inStream; + _finished = false; + Create(); + SetOutStream(outStream); + Init(); + _matchFinder.Init(); + + // if (!_fastMode) + { + FillDistancesPrices(); + FillAlignPrices(); + } + + _lenEncoder.SetTableSize(_numFastBytes + 1 - Base.K_MATCH_MIN_LEN); + _lenEncoder.UpdateTables((UInt32)1 << _posStateBits); + _repMatchLenEncoder.SetTableSize(_numFastBytes + 1 - Base.K_MATCH_MIN_LEN); + _repMatchLenEncoder.UpdateTables((UInt32)1 << _posStateBits); + + _nowPos64 = 0; + } + + public void Code(Stream inStream, Stream outStream, + Int64 inSize, Int64 outSize, ICodeProgress progress) + { + _needReleaseMfStream = false; + _processingMode = false; + try + { + SetStreams(inStream, outStream, inSize, outSize); + while (true) + { + Int64 processedInSize; + Int64 processedOutSize; + bool finished; + CodeOneBlock(out processedInSize, out processedOutSize, out finished); + if (finished) + { + return; + } + if (progress != null) + { + progress.SetProgress(processedInSize, processedOutSize); + } + } + } + finally + { + ReleaseStreams(); + } + } + + public long Code(Stream inStream, bool final) + { + _matchFinder.SetStream(inStream); + _processingMode = !final; + try + { + while (true) + { + Int64 processedInSize; + Int64 processedOutSize; + bool finished; + CodeOneBlock(out processedInSize, out processedOutSize, out finished); + if (finished) + { + return processedInSize; + } + } + } + finally + { + _matchFinder.ReleaseStream(); + if (final) + { + ReleaseStreams(); + } + } + } + + public void Train(Stream trainStream) + { + if (_nowPos64 > 0) + { + throw new InvalidOperationException(); + } + _trainSize = (uint)trainStream.Length; + if (_trainSize > 0) + { + _matchFinder.SetStream(trainStream); + for (; _trainSize > 0 && !_matchFinder.IsDataStarved; _trainSize--) + { + _matchFinder.Skip(1); + } + if (_trainSize == 0) + { + _previousByte = _matchFinder.GetIndexByte(-1); + } + _matchFinder.ReleaseStream(); + } + } + + private const int K_PROP_SIZE = 5; + private readonly Byte[] _properties = new Byte[K_PROP_SIZE]; + + public void WriteCoderProperties(Stream outStream) + { + _properties[0] = (Byte)((_posStateBits * 5 + _numLiteralPosStateBits) * 9 + _numLiteralContextBits); + for (int i = 0; i < 4; i++) + { + _properties[1 + i] = (Byte)((_dictionarySize >> (8 * i)) & 0xFF); + } + outStream.Write(_properties, 0, K_PROP_SIZE); + } + + private readonly UInt32[] _tempPrices = new UInt32[Base.K_NUM_FULL_DISTANCES]; + private UInt32 _matchPriceCount; + + private void FillDistancesPrices() + { + for (UInt32 i = Base.K_START_POS_MODEL_INDEX; i < Base.K_NUM_FULL_DISTANCES; i++) + { + UInt32 posSlot = GetPosSlot(i); + int footerBits = (int)((posSlot >> 1) - 1); + UInt32 baseVal = ((2 | (posSlot & 1)) << footerBits); + _tempPrices[i] = BitTreeEncoder.ReverseGetPrice(_posEncoders, + baseVal - posSlot - 1, footerBits, i - baseVal); + } + + for (UInt32 lenToPosState = 0; lenToPosState < Base.K_NUM_LEN_TO_POS_STATES; lenToPosState++) + { + UInt32 posSlot; + BitTreeEncoder encoder = _posSlotEncoder[lenToPosState]; + + UInt32 st = (lenToPosState << Base.K_NUM_POS_SLOT_BITS); + for (posSlot = 0; posSlot < _distTableSize; posSlot++) + { + _posSlotPrices[st + posSlot] = encoder.GetPrice(posSlot); + } + for (posSlot = Base.K_END_POS_MODEL_INDEX; posSlot < _distTableSize; posSlot++) + { + _posSlotPrices[st + posSlot] += ((((posSlot >> 1) - 1) - Base.K_NUM_ALIGN_BITS) << + BitEncoder.K_NUM_BIT_PRICE_SHIFT_BITS); + } + + UInt32 st2 = lenToPosState * Base.K_NUM_FULL_DISTANCES; + UInt32 i; + for (i = 0; i < Base.K_START_POS_MODEL_INDEX; i++) + { + _distancesPrices[st2 + i] = _posSlotPrices[st + i]; + } + for (; i < Base.K_NUM_FULL_DISTANCES; i++) + { + _distancesPrices[st2 + i] = _posSlotPrices[st + GetPosSlot(i)] + _tempPrices[i]; + } + } + _matchPriceCount = 0; + } + + private void FillAlignPrices() + { + for (UInt32 i = 0; i < Base.K_ALIGN_TABLE_SIZE; i++) + { + _alignPrices[i] = _posAlignEncoder.ReverseGetPrice(i); + } + _alignPriceCount = 0; + } + + private static readonly string[] K_MATCH_FINDER_I_DS = + { + "BT2", + "BT4" + }; + + private static int FindMatchFinder(string s) + { + for (int m = 0; m < K_MATCH_FINDER_I_DS.Length; m++) + { + if (s == K_MATCH_FINDER_I_DS[m]) + { + return m; + } + } + return -1; + } + + public void SetCoderProperties(CoderPropId[] propIDs, object[] properties) + { + for (UInt32 i = 0; i < properties.Length; i++) + { + object prop = properties[i]; + switch (propIDs[i]) + { + case CoderPropId.NumFastBytes: + { + if (!(prop is Int32)) + { + throw new InvalidParamException(); + } + Int32 numFastBytes = (Int32)prop; + if (numFastBytes < 5 || numFastBytes > Base.K_MATCH_MAX_LEN) + { + throw new InvalidParamException(); + } + _numFastBytes = (UInt32)numFastBytes; + break; + } + case CoderPropId.Algorithm: + { + /* + if (!(prop is Int32)) + throw new InvalidParamException(); + Int32 maximize = (Int32)prop; + _fastMode = (maximize == 0); + _maxMode = (maximize >= 2); + */ + break; + } + case CoderPropId.MatchFinder: + { + if (!(prop is String)) + { + throw new InvalidParamException(); + } + EMatchFinderType matchFinderIndexPrev = _matchFinderType; + int m = FindMatchFinder(((string)prop).ToUpper()); + if (m < 0) + { + throw new InvalidParamException(); + } + _matchFinderType = (EMatchFinderType)m; + if (_matchFinder != null && matchFinderIndexPrev != _matchFinderType) + { + _dictionarySizePrev = 0xFFFFFFFF; + _matchFinder = null; + } + break; + } + case CoderPropId.DictionarySize: + { + const int kDicLogSizeMaxCompress = 30; + if (!(prop is Int32)) + { + throw new InvalidParamException(); + } + ; + Int32 dictionarySize = (Int32)prop; + if (dictionarySize < (UInt32)(1 << Base.K_DIC_LOG_SIZE_MIN) || + dictionarySize > (UInt32)(1 << kDicLogSizeMaxCompress)) + { + throw new InvalidParamException(); + } + _dictionarySize = (UInt32)dictionarySize; + int dicLogSize; + for (dicLogSize = 0; dicLogSize < (UInt32)kDicLogSizeMaxCompress; dicLogSize++) + { + if (dictionarySize <= ((UInt32)(1) << dicLogSize)) + { + break; + } + } + _distTableSize = (UInt32)dicLogSize * 2; + break; + } + case CoderPropId.PosStateBits: + { + if (!(prop is Int32)) + { + throw new InvalidParamException(); + } + Int32 v = (Int32)prop; + if (v < 0 || v > (UInt32)Base.K_NUM_POS_STATES_BITS_ENCODING_MAX) + { + throw new InvalidParamException(); + } + _posStateBits = v; + _posStateMask = (((UInt32)1) << _posStateBits) - 1; + break; + } + case CoderPropId.LitPosBits: + { + if (!(prop is Int32)) + { + throw new InvalidParamException(); + } + Int32 v = (Int32)prop; + if (v < 0 || v > Base.K_NUM_LIT_POS_STATES_BITS_ENCODING_MAX) + { + throw new InvalidParamException(); + } + _numLiteralPosStateBits = v; + break; + } + case CoderPropId.LitContextBits: + { + if (!(prop is Int32)) + { + throw new InvalidParamException(); + } + Int32 v = (Int32)prop; + if (v < 0 || v > Base.K_NUM_LIT_CONTEXT_BITS_MAX) + { + throw new InvalidParamException(); + } + ; + _numLiteralContextBits = v; + break; + } + case CoderPropId.EndMarker: + { + if (!(prop is Boolean)) + { + throw new InvalidParamException(); + } + SetWriteEndMarkerMode((Boolean)prop); + break; + } + default: + throw new InvalidParamException(); + } + } + } + + private uint _trainSize; + + public void SetTrainSize(uint trainSize) + { + _trainSize = trainSize; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaEncoderProperties.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaEncoderProperties.cs new file mode 100644 index 0000000000..de7fa6f18a --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaEncoderProperties.cs @@ -0,0 +1,55 @@ +namespace SharpCompress.Compressors.LZMA +{ + public class LzmaEncoderProperties + { + internal CoderPropId[] _propIDs; + internal object[] _properties; + + public LzmaEncoderProperties() + : this(false) + { + } + + public LzmaEncoderProperties(bool eos) + : this(eos, 1 << 20) + { + } + + public LzmaEncoderProperties(bool eos, int dictionary) + : this(eos, dictionary, 32) + { + } + + public LzmaEncoderProperties(bool eos, int dictionary, int numFastBytes) + { + int posStateBits = 2; + int litContextBits = 3; + int litPosBits = 0; + int algorithm = 2; + string mf = "bt4"; + + _propIDs = new[] + { + CoderPropId.DictionarySize, + CoderPropId.PosStateBits, + CoderPropId.LitContextBits, + CoderPropId.LitPosBits, + CoderPropId.Algorithm, + CoderPropId.NumFastBytes, + CoderPropId.MatchFinder, + CoderPropId.EndMarker + }; + _properties = new object[] + { + dictionary, + posStateBits, + litContextBits, + litPosBits, + algorithm, + numFastBytes, + mf, + eos + }; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaStream.cs new file mode 100644 index 0000000000..c074704fb8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/LzmaStream.cs @@ -0,0 +1,318 @@ +using System; +using System.IO; +using SharpCompress.Compressors.LZMA.LZ; +using SharpCompress.Converters; + +namespace SharpCompress.Compressors.LZMA +{ + public class LzmaStream : Stream + { + private readonly Stream _inputStream; + private readonly long _inputSize; + private readonly long _outputSize; + + private readonly int _dictionarySize; + private readonly OutWindow _outWindow = new OutWindow(); + private readonly RangeCoder.Decoder _rangeDecoder = new RangeCoder.Decoder(); + private Decoder _decoder; + + private long _position; + private bool _endReached; + private long _availableBytes; + private long _rangeDecoderLimit; + private long _inputPosition; + + // LZMA2 + private readonly bool _isLzma2; + private bool _uncompressedChunk; + private bool _needDictReset = true; + private bool _needProps = true; + + private readonly Encoder _encoder; + private bool _isDisposed; + + public LzmaStream(byte[] properties, Stream inputStream) + : this(properties, inputStream, -1, -1, null, properties.Length < 5) + { + } + + public LzmaStream(byte[] properties, Stream inputStream, long inputSize) + : this(properties, inputStream, inputSize, -1, null, properties.Length < 5) + { + } + + public LzmaStream(byte[] properties, Stream inputStream, long inputSize, long outputSize) + : this(properties, inputStream, inputSize, outputSize, null, properties.Length < 5) + { + } + + public LzmaStream(byte[] properties, Stream inputStream, long inputSize, long outputSize, + Stream presetDictionary, bool isLzma2) + { + _inputStream = inputStream; + _inputSize = inputSize; + _outputSize = outputSize; + _isLzma2 = isLzma2; + + if (!isLzma2) + { + _dictionarySize = DataConverter.LittleEndian.GetInt32(properties, 1); + _outWindow.Create(_dictionarySize); + if (presetDictionary != null) + { + _outWindow.Train(presetDictionary); + } + + _rangeDecoder.Init(inputStream); + + _decoder = new Decoder(); + _decoder.SetDecoderProperties(properties); + Properties = properties; + + _availableBytes = outputSize < 0 ? long.MaxValue : outputSize; + _rangeDecoderLimit = inputSize; + } + else + { + _dictionarySize = 2 | (properties[0] & 1); + _dictionarySize <<= (properties[0] >> 1) + 11; + + _outWindow.Create(_dictionarySize); + if (presetDictionary != null) + { + _outWindow.Train(presetDictionary); + _needDictReset = false; + } + + Properties = new byte[1]; + _availableBytes = 0; + } + } + + public LzmaStream(LzmaEncoderProperties properties, bool isLzma2, Stream outputStream) + : this(properties, isLzma2, null, outputStream) + { + } + + public LzmaStream(LzmaEncoderProperties properties, bool isLzma2, Stream presetDictionary, Stream outputStream) + { + _isLzma2 = isLzma2; + _availableBytes = 0; + _endReached = true; + + if (isLzma2) + { + throw new NotImplementedException(); + } + + _encoder = new Encoder(); + _encoder.SetCoderProperties(properties._propIDs, properties._properties); + MemoryStream propStream = new MemoryStream(5); + _encoder.WriteCoderProperties(propStream); + Properties = propStream.ToArray(); + + _encoder.SetStreams(null, outputStream, -1, -1); + if (presetDictionary != null) + { + _encoder.Train(presetDictionary); + } + } + + public override bool CanRead => _encoder == null; + + public override bool CanSeek => false; + + public override bool CanWrite => _encoder != null; + + public override void Flush() + { + } + + protected override void Dispose(bool disposing) + { + if (_isDisposed) + { + return; + } + _isDisposed = true; + if (disposing) + { + if (_encoder != null) + { + _position = _encoder.Code(null, true); + } + _inputStream?.Dispose(); + } + base.Dispose(disposing); + } + + public override long Length => _position + _availableBytes; + + public override long Position { get => _position; set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + if (_endReached) + { + return 0; + } + + int total = 0; + while (total < count) + { + if (_availableBytes == 0) + { + if (_isLzma2) + { + DecodeChunkHeader(); + } + else + { + _endReached = true; + } + if (_endReached) + { + break; + } + } + + int toProcess = count - total; + if (toProcess > _availableBytes) + { + toProcess = (int)_availableBytes; + } + + _outWindow.SetLimit(toProcess); + if (_uncompressedChunk) + { + _inputPosition += _outWindow.CopyStream(_inputStream, toProcess); + } + else if (_decoder.Code(_dictionarySize, _outWindow, _rangeDecoder) + && _outputSize < 0) + { + _availableBytes = _outWindow.AvailableBytes; + } + + int read = _outWindow.Read(buffer, offset, toProcess); + total += read; + offset += read; + _position += read; + _availableBytes -= read; + + if (_availableBytes == 0 && !_uncompressedChunk) + { + _rangeDecoder.ReleaseStream(); + if (!_rangeDecoder.IsFinished || (_rangeDecoderLimit >= 0 && _rangeDecoder._total != _rangeDecoderLimit)) + { + throw new DataErrorException(); + } + _inputPosition += _rangeDecoder._total; + if (_outWindow.HasPending) + { + throw new DataErrorException(); + } + } + } + + if (_endReached) + { + if (_inputSize >= 0 && _inputPosition != _inputSize) + { + throw new DataErrorException(); + } + if (_outputSize >= 0 && _position != _outputSize) + { + throw new DataErrorException(); + } + } + + return total; + } + + private void DecodeChunkHeader() + { + int control = _inputStream.ReadByte(); + _inputPosition++; + + if (control == 0x00) + { + _endReached = true; + return; + } + + if (control >= 0xE0 || control == 0x01) + { + _needProps = true; + _needDictReset = false; + _outWindow.Reset(); + } + else if (_needDictReset) + { + throw new DataErrorException(); + } + + if (control >= 0x80) + { + _uncompressedChunk = false; + + _availableBytes = (control & 0x1F) << 16; + _availableBytes += (_inputStream.ReadByte() << 8) + _inputStream.ReadByte() + 1; + _inputPosition += 2; + + _rangeDecoderLimit = (_inputStream.ReadByte() << 8) + _inputStream.ReadByte() + 1; + _inputPosition += 2; + + if (control >= 0xC0) + { + _needProps = false; + Properties[0] = (byte)_inputStream.ReadByte(); + _inputPosition++; + + _decoder = new Decoder(); + _decoder.SetDecoderProperties(Properties); + } + else if (_needProps) + { + throw new DataErrorException(); + } + else if (control >= 0xA0) + { + _decoder = new Decoder(); + _decoder.SetDecoderProperties(Properties); + } + + _rangeDecoder.Init(_inputStream); + } + else if (control > 0x02) + { + throw new DataErrorException(); + } + else + { + _uncompressedChunk = true; + _availableBytes = (_inputStream.ReadByte() << 8) + _inputStream.ReadByte() + 1; + _inputPosition += 2; + } + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + if (_encoder != null) + { + _position = _encoder.Code(new MemoryStream(buffer, offset, count), false); + } + } + + public byte[] Properties { get; } = new byte[5]; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoder.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoder.cs new file mode 100644 index 0000000000..f4ee15b3b6 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoder.cs @@ -0,0 +1,252 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.LZMA.RangeCoder +{ + internal class Encoder + { + public const uint K_TOP_VALUE = (1 << 24); + + private Stream _stream; + + public UInt64 _low; + public uint _range; + private uint _cacheSize; + private byte _cache; + + //long StartPosition; + + public void SetStream(Stream stream) + { + _stream = stream; + } + + public void ReleaseStream() + { + _stream = null; + } + + public void Init() + { + //StartPosition = Stream.Position; + + _low = 0; + _range = 0xFFFFFFFF; + _cacheSize = 1; + _cache = 0; + } + + public void FlushData() + { + for (int i = 0; i < 5; i++) + { + ShiftLow(); + } + } + + public void FlushStream() + { + _stream.Flush(); + } + + public void CloseStream() + { + _stream.Dispose(); + } + + public void Encode(uint start, uint size, uint total) + { + _low += start * (_range /= total); + _range *= size; + while (_range < K_TOP_VALUE) + { + _range <<= 8; + ShiftLow(); + } + } + + public void ShiftLow() + { + if ((uint)_low < 0xFF000000 || (uint)(_low >> 32) == 1) + { + byte temp = _cache; + do + { + _stream.WriteByte((byte)(temp + (_low >> 32))); + temp = 0xFF; + } + while (--_cacheSize != 0); + _cache = (byte)(((uint)_low) >> 24); + } + _cacheSize++; + _low = ((uint)_low) << 8; + } + + public void EncodeDirectBits(uint v, int numTotalBits) + { + for (int i = numTotalBits - 1; i >= 0; i--) + { + _range >>= 1; + if (((v >> i) & 1) == 1) + { + _low += _range; + } + if (_range < K_TOP_VALUE) + { + _range <<= 8; + ShiftLow(); + } + } + } + + public void EncodeBit(uint size0, int numTotalBits, uint symbol) + { + uint newBound = (_range >> numTotalBits) * size0; + if (symbol == 0) + { + _range = newBound; + } + else + { + _low += newBound; + _range -= newBound; + } + while (_range < K_TOP_VALUE) + { + _range <<= 8; + ShiftLow(); + } + } + + public long GetProcessedSizeAdd() + { + return -1; + + //return _cacheSize + Stream.Position - StartPosition + 4; + // (long)Stream.GetProcessedSize(); + } + } + + internal class Decoder + { + public const uint K_TOP_VALUE = (1 << 24); + public uint _range; + public uint _code; + + // public Buffer.InBuffer Stream = new Buffer.InBuffer(1 << 16); + public Stream _stream; + public long _total; + + public void Init(Stream stream) + { + // Stream.Init(stream); + _stream = stream; + + _code = 0; + _range = 0xFFFFFFFF; + for (int i = 0; i < 5; i++) + { + _code = (_code << 8) | (byte)_stream.ReadByte(); + } + _total = 5; + } + + public void ReleaseStream() + { + // Stream.ReleaseStream(); + _stream = null; + } + + public void CloseStream() + { + _stream.Dispose(); + } + + public void Normalize() + { + while (_range < K_TOP_VALUE) + { + _code = (_code << 8) | (byte)_stream.ReadByte(); + _range <<= 8; + _total++; + } + } + + public void Normalize2() + { + if (_range < K_TOP_VALUE) + { + _code = (_code << 8) | (byte)_stream.ReadByte(); + _range <<= 8; + _total++; + } + } + + public uint GetThreshold(uint total) + { + return _code / (_range /= total); + } + + public void Decode(uint start, uint size) + { + _code -= start * _range; + _range *= size; + Normalize(); + } + + public uint DecodeDirectBits(int numTotalBits) + { + uint range = _range; + uint code = _code; + uint result = 0; + for (int i = numTotalBits; i > 0; i--) + { + range >>= 1; + /* + result <<= 1; + if (code >= range) + { + code -= range; + result |= 1; + } + */ + uint t = (code - range) >> 31; + code -= range & (t - 1); + result = (result << 1) | (1 - t); + + if (range < K_TOP_VALUE) + { + code = (code << 8) | (byte)_stream.ReadByte(); + range <<= 8; + _total++; + } + } + _range = range; + _code = code; + return result; + } + + public uint DecodeBit(uint size0, int numTotalBits) + { + uint newBound = (_range >> numTotalBits) * size0; + uint symbol; + if (_code < newBound) + { + symbol = 0; + _range = newBound; + } + else + { + symbol = 1; + _code -= newBound; + _range -= newBound; + } + Normalize(); + return symbol; + } + + public bool IsFinished => _code == 0; + + // ulong GetProcessedSize() {return Stream.GetProcessedSize(); } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoderBit.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoderBit.cs new file mode 100644 index 0000000000..2ab53aff6f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoderBit.cs @@ -0,0 +1,140 @@ +using System; + +namespace SharpCompress.Compressors.LZMA.RangeCoder +{ + internal struct BitEncoder + { + public const int K_NUM_BIT_MODEL_TOTAL_BITS = 11; + public const uint K_BIT_MODEL_TOTAL = (1 << K_NUM_BIT_MODEL_TOTAL_BITS); + private const int K_NUM_MOVE_BITS = 5; + private const int K_NUM_MOVE_REDUCING_BITS = 2; + public const int K_NUM_BIT_PRICE_SHIFT_BITS = 6; + + private uint _prob; + + public void Init() + { + _prob = K_BIT_MODEL_TOTAL >> 1; + } + + public void UpdateModel(uint symbol) + { + if (symbol == 0) + { + _prob += (K_BIT_MODEL_TOTAL - _prob) >> K_NUM_MOVE_BITS; + } + else + { + _prob -= (_prob) >> K_NUM_MOVE_BITS; + } + } + + public void Encode(Encoder encoder, uint symbol) + { + // encoder.EncodeBit(Prob, kNumBitModelTotalBits, symbol); + // UpdateModel(symbol); + uint newBound = (encoder._range >> K_NUM_BIT_MODEL_TOTAL_BITS) * _prob; + if (symbol == 0) + { + encoder._range = newBound; + _prob += (K_BIT_MODEL_TOTAL - _prob) >> K_NUM_MOVE_BITS; + } + else + { + encoder._low += newBound; + encoder._range -= newBound; + _prob -= (_prob) >> K_NUM_MOVE_BITS; + } + if (encoder._range < Encoder.K_TOP_VALUE) + { + encoder._range <<= 8; + encoder.ShiftLow(); + } + } + + private static readonly UInt32[] PROB_PRICES = new UInt32[K_BIT_MODEL_TOTAL >> K_NUM_MOVE_REDUCING_BITS]; + + static BitEncoder() + { + const int kNumBits = (K_NUM_BIT_MODEL_TOTAL_BITS - K_NUM_MOVE_REDUCING_BITS); + for (int i = kNumBits - 1; i >= 0; i--) + { + UInt32 start = (UInt32)1 << (kNumBits - i - 1); + UInt32 end = (UInt32)1 << (kNumBits - i); + for (UInt32 j = start; j < end; j++) + { + PROB_PRICES[j] = ((UInt32)i << K_NUM_BIT_PRICE_SHIFT_BITS) + + (((end - j) << K_NUM_BIT_PRICE_SHIFT_BITS) >> (kNumBits - i - 1)); + } + } + } + + public uint GetPrice(uint symbol) + { + return PROB_PRICES[(((_prob - symbol) ^ ((-(int)symbol))) & (K_BIT_MODEL_TOTAL - 1)) >> K_NUM_MOVE_REDUCING_BITS]; + } + + public uint GetPrice0() + { + return PROB_PRICES[_prob >> K_NUM_MOVE_REDUCING_BITS]; + } + + public uint GetPrice1() + { + return PROB_PRICES[(K_BIT_MODEL_TOTAL - _prob) >> K_NUM_MOVE_REDUCING_BITS]; + } + } + + internal struct BitDecoder + { + public const int K_NUM_BIT_MODEL_TOTAL_BITS = 11; + public const uint K_BIT_MODEL_TOTAL = (1 << K_NUM_BIT_MODEL_TOTAL_BITS); + private const int K_NUM_MOVE_BITS = 5; + + private uint _prob; + + public void UpdateModel(int numMoveBits, uint symbol) + { + if (symbol == 0) + { + _prob += (K_BIT_MODEL_TOTAL - _prob) >> numMoveBits; + } + else + { + _prob -= (_prob) >> numMoveBits; + } + } + + public void Init() + { + _prob = K_BIT_MODEL_TOTAL >> 1; + } + + public uint Decode(Decoder rangeDecoder) + { + uint newBound = (rangeDecoder._range >> K_NUM_BIT_MODEL_TOTAL_BITS) * _prob; + if (rangeDecoder._code < newBound) + { + rangeDecoder._range = newBound; + _prob += (K_BIT_MODEL_TOTAL - _prob) >> K_NUM_MOVE_BITS; + if (rangeDecoder._range < Decoder.K_TOP_VALUE) + { + rangeDecoder._code = (rangeDecoder._code << 8) | (byte)rangeDecoder._stream.ReadByte(); + rangeDecoder._range <<= 8; + rangeDecoder._total++; + } + return 0; + } + rangeDecoder._range -= newBound; + rangeDecoder._code -= newBound; + _prob -= (_prob) >> K_NUM_MOVE_BITS; + if (rangeDecoder._range < Decoder.K_TOP_VALUE) + { + rangeDecoder._code = (rangeDecoder._code << 8) | (byte)rangeDecoder._stream.ReadByte(); + rangeDecoder._range <<= 8; + rangeDecoder._total++; + } + return 1; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoderBitTree.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoderBitTree.cs new file mode 100644 index 0000000000..cc51160399 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/RangeCoder/RangeCoderBitTree.cs @@ -0,0 +1,163 @@ +using System; + +namespace SharpCompress.Compressors.LZMA.RangeCoder +{ + internal struct BitTreeEncoder + { + private readonly BitEncoder[] _models; + private readonly int _numBitLevels; + + public BitTreeEncoder(int numBitLevels) + { + _numBitLevels = numBitLevels; + _models = new BitEncoder[1 << numBitLevels]; + } + + public void Init() + { + for (uint i = 1; i < (1 << _numBitLevels); i++) + { + _models[i].Init(); + } + } + + public void Encode(Encoder rangeEncoder, UInt32 symbol) + { + UInt32 m = 1; + for (int bitIndex = _numBitLevels; bitIndex > 0;) + { + bitIndex--; + UInt32 bit = (symbol >> bitIndex) & 1; + _models[m].Encode(rangeEncoder, bit); + m = (m << 1) | bit; + } + } + + public void ReverseEncode(Encoder rangeEncoder, UInt32 symbol) + { + UInt32 m = 1; + for (UInt32 i = 0; i < _numBitLevels; i++) + { + UInt32 bit = symbol & 1; + _models[m].Encode(rangeEncoder, bit); + m = (m << 1) | bit; + symbol >>= 1; + } + } + + public UInt32 GetPrice(UInt32 symbol) + { + UInt32 price = 0; + UInt32 m = 1; + for (int bitIndex = _numBitLevels; bitIndex > 0;) + { + bitIndex--; + UInt32 bit = (symbol >> bitIndex) & 1; + price += _models[m].GetPrice(bit); + m = (m << 1) + bit; + } + return price; + } + + public UInt32 ReverseGetPrice(UInt32 symbol) + { + UInt32 price = 0; + UInt32 m = 1; + for (int i = _numBitLevels; i > 0; i--) + { + UInt32 bit = symbol & 1; + symbol >>= 1; + price += _models[m].GetPrice(bit); + m = (m << 1) | bit; + } + return price; + } + + public static UInt32 ReverseGetPrice(BitEncoder[] models, UInt32 startIndex, + int numBitLevels, UInt32 symbol) + { + UInt32 price = 0; + UInt32 m = 1; + for (int i = numBitLevels; i > 0; i--) + { + UInt32 bit = symbol & 1; + symbol >>= 1; + price += models[startIndex + m].GetPrice(bit); + m = (m << 1) | bit; + } + return price; + } + + public static void ReverseEncode(BitEncoder[] models, UInt32 startIndex, + Encoder rangeEncoder, int numBitLevels, UInt32 symbol) + { + UInt32 m = 1; + for (int i = 0; i < numBitLevels; i++) + { + UInt32 bit = symbol & 1; + models[startIndex + m].Encode(rangeEncoder, bit); + m = (m << 1) | bit; + symbol >>= 1; + } + } + } + + internal struct BitTreeDecoder + { + private readonly BitDecoder[] _models; + private readonly int _numBitLevels; + + public BitTreeDecoder(int numBitLevels) + { + _numBitLevels = numBitLevels; + _models = new BitDecoder[1 << numBitLevels]; + } + + public void Init() + { + for (uint i = 1; i < (1 << _numBitLevels); i++) + { + _models[i].Init(); + } + } + + public uint Decode(Decoder rangeDecoder) + { + uint m = 1; + for (int bitIndex = _numBitLevels; bitIndex > 0; bitIndex--) + { + m = (m << 1) + _models[m].Decode(rangeDecoder); + } + return m - ((uint)1 << _numBitLevels); + } + + public uint ReverseDecode(Decoder rangeDecoder) + { + uint m = 1; + uint symbol = 0; + for (int bitIndex = 0; bitIndex < _numBitLevels; bitIndex++) + { + uint bit = _models[m].Decode(rangeDecoder); + m <<= 1; + m += bit; + symbol |= (bit << bitIndex); + } + return symbol; + } + + public static uint ReverseDecode(BitDecoder[] models, UInt32 startIndex, + Decoder rangeDecoder, int numBitLevels) + { + uint m = 1; + uint symbol = 0; + for (int bitIndex = 0; bitIndex < numBitLevels; bitIndex++) + { + uint bit = models[startIndex + m].Decode(rangeDecoder); + m <<= 1; + m += bit; + symbol |= (bit << bitIndex); + } + return symbol; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Registry.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Registry.cs new file mode 100644 index 0000000000..edc02854e2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Registry.cs @@ -0,0 +1,58 @@ +using System; +using System.IO; +using System.Linq; +using SharpCompress.Common.SevenZip; +using SharpCompress.Compressors.BZip2; +using SharpCompress.Compressors.Deflate; +using SharpCompress.Compressors.Filters; +using SharpCompress.Compressors.LZMA.Utilites; +using SharpCompress.Compressors.PPMd; + +namespace SharpCompress.Compressors.LZMA +{ + internal static class DecoderRegistry + { + private const uint K_COPY = 0x0; + private const uint K_DELTA = 3; + private const uint K_LZMA2 = 0x21; + private const uint K_LZMA = 0x030101; + private const uint K_PPMD = 0x030401; + private const uint K_BCJ = 0x03030103; + private const uint K_BCJ2 = 0x0303011B; + private const uint K_DEFLATE = 0x040108; + private const uint K_B_ZIP2 = 0x040202; + + internal static Stream CreateDecoderStream(CMethodId id, Stream[] inStreams, byte[] info, IPasswordProvider pass, + long limit) + { + switch (id._id) + { + case K_COPY: + if (info != null) + { + throw new NotSupportedException(); + } + return inStreams.Single(); + case K_LZMA: + case K_LZMA2: + return new LzmaStream(info, inStreams.Single(), -1, limit); +#if !NO_CRYPTO + case CMethodId.K_AES_ID: + return new AesDecoderStream(inStreams.Single(), info, pass, limit); +#endif + case K_BCJ: + return new BCJFilter(false, inStreams.Single()); + case K_BCJ2: + return new Bcj2DecoderStream(inStreams, info, limit); + case K_B_ZIP2: + return new BZip2Stream(inStreams.Single(), CompressionMode.Decompress, true); + case K_PPMD: + return new PpmdStream(new PpmdProperties(info), inStreams.Single(), false); + case K_DEFLATE: + return new DeflateStream(inStreams.Single(), CompressionMode.Decompress); + default: + throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/CrcBuilderStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/CrcBuilderStream.cs new file mode 100644 index 0000000000..4410dd9aaf --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/CrcBuilderStream.cs @@ -0,0 +1,84 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.LZMA.Utilites +{ + internal class CrcBuilderStream : Stream + { + private readonly Stream _mTarget; + private uint _mCrc; + private bool _mFinished; + private bool _isDisposed; + + public CrcBuilderStream(Stream target) + { + _mTarget = target; + _mCrc = Crc.INIT_CRC; + } + + protected override void Dispose(bool disposing) + { + if (_isDisposed) + { + return; + } + _isDisposed = true; + _mTarget.Dispose(); + base.Dispose(disposing); + } + + public long Processed { get; private set; } + + public uint Finish() + { + if (!_mFinished) + { + _mFinished = true; + _mCrc = Crc.Finish(_mCrc); + } + + return _mCrc; + } + + public override bool CanRead => false; + + public override bool CanSeek => false; + + public override bool CanWrite => true; + + public override void Flush() + { + } + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + throw new InvalidOperationException(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + if (_mFinished) + { + throw new InvalidOperationException("CRC calculation has been finished."); + } + + Processed += count; + _mCrc = Crc.Update(_mCrc, buffer, offset, count); + _mTarget.Write(buffer, offset, count); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/CrcCheckStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/CrcCheckStream.cs new file mode 100644 index 0000000000..04b00f818f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/CrcCheckStream.cs @@ -0,0 +1,105 @@ +using System; +using System.Diagnostics; +using System.IO; + +namespace SharpCompress.Compressors.LZMA.Utilites +{ + internal class CrcCheckStream : Stream + { + private readonly uint _mExpectedCrc; + private uint _mCurrentCrc; + private bool _mClosed; + + private readonly long[] _mBytes = new long[256]; + private long _mLength; + + public CrcCheckStream(uint crc) + { + _mExpectedCrc = crc; + _mCurrentCrc = Crc.INIT_CRC; + } + + protected override void Dispose(bool disposing) + { + if (_mCurrentCrc != _mExpectedCrc) + { + throw new InvalidOperationException(); + } + try + { + if (disposing && !_mClosed) + { + _mClosed = true; + _mCurrentCrc = Crc.Finish(_mCurrentCrc); +#if DEBUG + if (_mCurrentCrc == _mExpectedCrc) + { + Debug.WriteLine("CRC ok: " + _mExpectedCrc.ToString("x8")); + } + else + { + Debugger.Break(); + Debug.WriteLine("bad CRC"); + } + + double lengthInv = 1.0 / _mLength; + double entropy = 0; + for (int i = 0; i < 256; i++) + { + if (_mBytes[i] != 0) + { + double p = lengthInv * _mBytes[i]; + entropy -= p * Math.Log(p, 256); + } + } + Debug.WriteLine("entropy: " + (int)(entropy * 100) + "%"); +#endif + } + } + finally + { + base.Dispose(disposing); + } + } + + public override bool CanRead => false; + + public override bool CanSeek => false; + + public override bool CanWrite => true; + + public override void Flush() + { + } + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + throw new InvalidOperationException(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + _mLength += count; + for (int i = 0; i < count; i++) + { + _mBytes[buffer[offset + i]]++; + } + + _mCurrentCrc = Crc.Update(_mCurrentCrc, buffer, offset, count); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/IPasswordProvider.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/IPasswordProvider.cs new file mode 100644 index 0000000000..51dc389e70 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/IPasswordProvider.cs @@ -0,0 +1,7 @@ +namespace SharpCompress.Compressors.LZMA.Utilites +{ + internal interface IPasswordProvider + { + string CryptoGetTextPassword(); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/Utils.cs b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/Utils.cs new file mode 100644 index 0000000000..9967e292e6 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/LZMA/Utilites/Utils.cs @@ -0,0 +1,92 @@ +using System; +using System.Diagnostics; +using System.IO; + +namespace SharpCompress.Compressors.LZMA.Utilites +{ + internal enum BlockType : byte + { + #region Constants + + End = 0, + Header = 1, + ArchiveProperties = 2, + AdditionalStreamsInfo = 3, + MainStreamsInfo = 4, + FilesInfo = 5, + PackInfo = 6, + UnpackInfo = 7, + SubStreamsInfo = 8, + Size = 9, + Crc = 10, + Folder = 11, + CodersUnpackSize = 12, + NumUnpackStream = 13, + EmptyStream = 14, + EmptyFile = 15, + Anti = 16, + Name = 17, + CTime = 18, + ATime = 19, + MTime = 20, + WinAttributes = 21, + Comment = 22, + EncodedHeader = 23, + StartPos = 24, + Dummy = 25 + + #endregion + } + + internal static class Utils + { + [Conditional("DEBUG")] + public static void Assert(bool expression) + { + if (!expression) + { + if (Debugger.IsAttached) + { + Debugger.Break(); + } + + throw new Exception("Assertion failed."); + } + } + + public static void ReadExact(this Stream stream, byte[] buffer, int offset, int length) + { + if (stream == null) + { + throw new ArgumentNullException(nameof(stream)); + } + + if (buffer == null) + { + throw new ArgumentNullException(nameof(buffer)); + } + + if (offset < 0 || offset > buffer.Length) + { + throw new ArgumentOutOfRangeException(nameof(offset)); + } + + if (length < 0 || length > buffer.Length - offset) + { + throw new ArgumentOutOfRangeException(nameof(length)); + } + + while (length > 0) + { + int fetched = stream.Read(buffer, offset, length); + if (fetched <= 0) + { + throw new EndOfStreamException(); + } + + offset += fetched; + length -= fetched; + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/FreqData.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/FreqData.cs new file mode 100644 index 0000000000..1c01404b3c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/FreqData.cs @@ -0,0 +1,67 @@ +using System; +using System.Text; +using SharpCompress.Converters; + +namespace SharpCompress.Compressors.PPMd.H +{ + internal class FreqData : Pointer + { + internal const int SIZE = 6; + + // struct FreqData + // { + // ushort SummFreq; + // STATE _PACK_ATTR * Stats; + // }; + + internal FreqData(byte[] memory) + : base(memory) + { + } + + internal int SummFreq { get => DataConverter.LittleEndian.GetInt16(Memory, Address) & 0xffff; set => DataConverter.LittleEndian.PutBytes(Memory, Address, (short)value); } + + internal FreqData Initialize(byte[] mem) + { + return base.Initialize(mem); + } + + internal void IncrementSummFreq(int dSummFreq) + { + short summFreq = DataConverter.LittleEndian.GetInt16(Memory, Address); + summFreq += (short)dSummFreq; + DataConverter.LittleEndian.PutBytes(Memory, Address, summFreq); + } + + internal int GetStats() + { + return DataConverter.LittleEndian.GetInt32(Memory, Address + 2); + } + + internal virtual void SetStats(State state) + { + SetStats(state.Address); + } + + internal void SetStats(int state) + { + DataConverter.LittleEndian.PutBytes(Memory, Address + 2, state); + } + + public override String ToString() + { + StringBuilder buffer = new StringBuilder(); + buffer.Append("FreqData["); + buffer.Append("\n Address="); + buffer.Append(Address); + buffer.Append("\n size="); + buffer.Append(SIZE); + buffer.Append("\n summFreq="); + buffer.Append(SummFreq); + buffer.Append("\n stats="); + buffer.Append(GetStats()); + buffer.Append("\n]"); + return buffer.ToString(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/ModelPPM.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/ModelPPM.cs new file mode 100644 index 0000000000..883aa062e8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/ModelPPM.cs @@ -0,0 +1,915 @@ +using System; +using System.IO; +using System.Text; +using SharpCompress.Compressors.Rar; +using Decoder = SharpCompress.Compressors.LZMA.RangeCoder.Decoder; + +namespace SharpCompress.Compressors.PPMd.H +{ + internal class ModelPpm + { + private void InitBlock() + { + for (int i = 0; i < 25; i++) + { + _see2Cont[i] = new See2Context[16]; + } + for (int i2 = 0; i2 < 128; i2++) + { + _binSumm[i2] = new int[64]; + } + } + + public SubAllocator SubAlloc { get; } = new SubAllocator(); + + public virtual See2Context DummySee2Cont => _dummySee2Cont; + + public virtual int InitRl => _initRl; + + public virtual int EscCount { get => _escCount; set => _escCount = value & 0xff; } + + public virtual int[] CharMask => _charMask; + + public virtual int NumMasked { get => _numMasked; set => _numMasked = value; } + + public virtual int PrevSuccess { get => _prevSuccess; set => _prevSuccess = value & 0xff; } + + public virtual int InitEsc { get => _initEsc; set => _initEsc = value; } + + public virtual int RunLength { get => _runLength; set => _runLength = value; } + + public virtual int HiBitsFlag { get => _hiBitsFlag; set => _hiBitsFlag = value & 0xff; } + + public virtual int[][] BinSumm => _binSumm; + + internal RangeCoder Coder { get; private set; } + + internal State FoundState { get; private set; } + + public virtual byte[] Heap => SubAlloc.Heap; + + public virtual int OrderFall => _orderFall; + + public const int MAX_O = 64; /* maximum allowed model order */ + + public const int INT_BITS = 7; + + public const int PERIOD_BITS = 7; + + //UPGRADE_NOTE: Final was removed from the declaration of 'TOT_BITS '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + public static readonly int TOT_BITS = INT_BITS + PERIOD_BITS; + + //UPGRADE_NOTE: Final was removed from the declaration of 'INTERVAL '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + public static readonly int INTERVAL = 1 << INT_BITS; + + //UPGRADE_NOTE: Final was removed from the declaration of 'BIN_SCALE '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + public static readonly int BIN_SCALE = 1 << TOT_BITS; + + public const int MAX_FREQ = 124; + + private readonly See2Context[][] _see2Cont = new See2Context[25][]; + + private See2Context _dummySee2Cont; + + private PpmContext _minContext; //medContext + + private PpmContext _maxContext; + + private int _numMasked, _initEsc, _orderFall, _maxOrder, _runLength, _initRl; + + private readonly int[] _charMask = new int[256]; + + private readonly int[] _ns2Indx = new int[256]; + + private readonly int[] _ns2BsIndx = new int[256]; + + private readonly int[] _hb2Flag = new int[256]; + + // byte EscCount, PrevSuccess, HiBitsFlag; + private int _escCount, _prevSuccess, _hiBitsFlag; + + private readonly int[][] _binSumm = new int[128][]; // binary SEE-contexts + + private static readonly int[] INIT_BIN_ESC = {0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051}; + + // Temp fields + //UPGRADE_NOTE: Final was removed from the declaration of 'tempState1 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly State _tempState1 = new State(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempState2 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly State _tempState2 = new State(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempState3 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly State _tempState3 = new State(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempState4 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly State _tempState4 = new State(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempStateRef1 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly StateRef _tempStateRef1 = new StateRef(); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempStateRef2 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly StateRef _tempStateRef2 = new StateRef(); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempPPMContext1 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly PpmContext _tempPpmContext1 = new PpmContext(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempPPMContext2 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly PpmContext _tempPpmContext2 = new PpmContext(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempPPMContext3 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly PpmContext _tempPpmContext3 = new PpmContext(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempPPMContext4 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly PpmContext _tempPpmContext4 = new PpmContext(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'ps '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly int[] _ps = new int[MAX_O]; + + public ModelPpm() + { + InitBlock(); + _minContext = null; + _maxContext = null; + + //medContext = null; + } + + private void RestartModelRare() + { + Utility.Fill(_charMask, 0); + SubAlloc.InitSubAllocator(); + _initRl = -(_maxOrder < 12 ? _maxOrder : 12) - 1; + int addr = SubAlloc.AllocContext(); + _minContext.Address = addr; + _maxContext.Address = addr; + _minContext.SetSuffix(0); + _orderFall = _maxOrder; + _minContext.NumStats = 256; + _minContext.FreqData.SummFreq = _minContext.NumStats + 1; + + addr = SubAlloc.AllocUnits(256 / 2); + FoundState.Address = addr; + _minContext.FreqData.SetStats(addr); + + State state = new State(SubAlloc.Heap); + addr = _minContext.FreqData.GetStats(); + _runLength = _initRl; + _prevSuccess = 0; + for (int i = 0; i < 256; i++) + { + state.Address = addr + i * State.SIZE; + state.Symbol = i; + state.Freq = 1; + state.SetSuccessor(0); + } + + for (int i = 0; i < 128; i++) + { + for (int k = 0; k < 8; k++) + { + for (int m = 0; m < 64; m += 8) + { + _binSumm[i][k + m] = BIN_SCALE - INIT_BIN_ESC[k] / (i + 2); + } + } + } + for (int i = 0; i < 25; i++) + { + for (int k = 0; k < 16; k++) + { + _see2Cont[i][k].Initialize(5 * i + 10); + } + } + } + + private void StartModelRare(int maxOrder) + { + int i, k, m, step; + _escCount = 1; + _maxOrder = maxOrder; + RestartModelRare(); + + // Bug Fixed + _ns2BsIndx[0] = 0; + _ns2BsIndx[1] = 2; + for (int j = 0; j < 9; j++) + { + _ns2BsIndx[2 + j] = 4; + } + for (int j = 0; j < 256 - 11; j++) + { + _ns2BsIndx[11 + j] = 6; + } + for (i = 0; i < 3; i++) + { + _ns2Indx[i] = i; + } + for (m = i, k = 1, step = 1; i < 256; i++) + { + _ns2Indx[i] = m; + if ((--k) == 0) + { + k = ++step; + m++; + } + } + for (int j = 0; j < 0x40; j++) + { + _hb2Flag[j] = 0; + } + for (int j = 0; j < 0x100 - 0x40; j++) + { + _hb2Flag[0x40 + j] = 0x08; + } + _dummySee2Cont.Shift = PERIOD_BITS; + } + + private void ClearMask() + { + _escCount = 1; + Utility.Fill(_charMask, 0); + } + + internal bool DecodeInit(IRarUnpack unpackRead, int escChar) + { + int maxOrder = unpackRead.Char & 0xff; + bool reset = ((maxOrder & 0x20) != 0); + + int maxMb = 0; + if (reset) + { + maxMb = unpackRead.Char; + } + else + { + if (SubAlloc.GetAllocatedMemory() == 0) + { + return (false); + } + } + if ((maxOrder & 0x40) != 0) + { + escChar = unpackRead.Char; + unpackRead.PpmEscChar = escChar; + } + Coder = new RangeCoder(unpackRead); + if (reset) + { + maxOrder = (maxOrder & 0x1f) + 1; + if (maxOrder > 16) + { + maxOrder = 16 + (maxOrder - 16) * 3; + } + if (maxOrder == 1) + { + SubAlloc.StopSubAllocator(); + return (false); + } + SubAlloc.StartSubAllocator((maxMb + 1) << 20); + _minContext = new PpmContext(Heap); + + //medContext = new PPMContext(Heap); + _maxContext = new PpmContext(Heap); + FoundState = new State(Heap); + _dummySee2Cont = new See2Context(); + for (int i = 0; i < 25; i++) + { + for (int j = 0; j < 16; j++) + { + _see2Cont[i][j] = new See2Context(); + } + } + StartModelRare(maxOrder); + } + return (_minContext.Address != 0); + } + + public virtual int DecodeChar() + { + // Debug + //subAlloc.dumpHeap(); + + if (_minContext.Address <= SubAlloc.PText || _minContext.Address > SubAlloc.HeapEnd) + { + return (-1); + } + + if (_minContext.NumStats != 1) + { + if (_minContext.FreqData.GetStats() <= SubAlloc.PText || + _minContext.FreqData.GetStats() > SubAlloc.HeapEnd) + { + return (-1); + } + if (!_minContext.DecodeSymbol1(this)) + { + return (-1); + } + } + else + { + _minContext.DecodeBinSymbol(this); + } + Coder.Decode(); + while (FoundState.Address == 0) + { + Coder.AriDecNormalize(); + do + { + _orderFall++; + _minContext.Address = _minContext.GetSuffix(); // =MinContext->Suffix; + if (_minContext.Address <= SubAlloc.PText || _minContext.Address > SubAlloc.HeapEnd) + { + return (-1); + } + } + while (_minContext.NumStats == _numMasked); + if (!_minContext.DecodeSymbol2(this)) + { + return (-1); + } + Coder.Decode(); + } + int symbol = FoundState.Symbol; + if ((_orderFall == 0) && FoundState.GetSuccessor() > SubAlloc.PText) + { + // MinContext=MaxContext=FoundState->Successor; + int addr = FoundState.GetSuccessor(); + _minContext.Address = addr; + _maxContext.Address = addr; + } + else + { + UpdateModel(); + + //this.foundState.Address=foundState.Address);//TODO just 4 debugging + if (_escCount == 0) + { + ClearMask(); + } + } + Coder.AriDecNormalize(); // ARI_DEC_NORMALIZE(Coder.code,Coder.low,Coder.range,Coder.UnpackRead); + return (symbol); + } + + public virtual See2Context[][] GetSee2Cont() + { + return _see2Cont; + } + + public virtual void IncEscCount(int dEscCount) + { + EscCount = EscCount + dEscCount; + } + + public virtual void IncRunLength(int dRunLength) + { + RunLength = RunLength + dRunLength; + } + + public virtual int[] GetHb2Flag() + { + return _hb2Flag; + } + + public virtual int[] GetNs2BsIndx() + { + return _ns2BsIndx; + } + + public virtual int[] GetNs2Indx() + { + return _ns2Indx; + } + + private int CreateSuccessors(bool skip, State p1) + { + //State upState = tempState1.Initialize(null); + StateRef upState = _tempStateRef2; + State tempState = _tempState1.Initialize(Heap); + + // PPM_CONTEXT* pc=MinContext, * UpBranch=FoundState->Successor; + PpmContext pc = _tempPpmContext1.Initialize(Heap); + pc.Address = _minContext.Address; + PpmContext upBranch = _tempPpmContext2.Initialize(Heap); + upBranch.Address = FoundState.GetSuccessor(); + + // STATE * p, * ps[MAX_O], ** pps=ps; + State p = _tempState2.Initialize(Heap); + int pps = 0; + + bool noLoop = false; + + if (!skip) + { + _ps[pps++] = FoundState.Address; // *pps++ = FoundState; + if (pc.GetSuffix() == 0) + { + noLoop = true; + } + } + if (!noLoop) + { + bool loopEntry = false; + if (p1.Address != 0) + { + p.Address = p1.Address; + pc.Address = pc.GetSuffix(); // =pc->Suffix; + loopEntry = true; + } + do + { + if (!loopEntry) + { + pc.Address = pc.GetSuffix(); // pc=pc->Suffix; + if (pc.NumStats != 1) + { + p.Address = pc.FreqData.GetStats(); // p=pc->U.Stats + if (p.Symbol != FoundState.Symbol) + { + do + { + p.IncrementAddress(); + } + while (p.Symbol != FoundState.Symbol); + } + } + else + { + p.Address = pc.GetOneState().Address; // p=&(pc->OneState); + } + } // LOOP_ENTRY: + loopEntry = false; + if (p.GetSuccessor() != upBranch.Address) + { + pc.Address = p.GetSuccessor(); // =p->Successor; + break; + } + _ps[pps++] = p.Address; + } + while (pc.GetSuffix() != 0); + } // NO_LOOP: + if (pps == 0) + { + return pc.Address; + } + upState.Symbol = Heap[upBranch.Address]; // UpState.Symbol=*(byte*) + + // UpBranch; + // UpState.Successor=(PPM_CONTEXT*) (((byte*) UpBranch)+1); + upState.SetSuccessor(upBranch.Address + 1); //TODO check if +1 necessary + if (pc.NumStats != 1) + { + if (pc.Address <= SubAlloc.PText) + { + return (0); + } + p.Address = pc.FreqData.GetStats(); + if (p.Symbol != upState.Symbol) + { + do + { + p.IncrementAddress(); + } + while (p.Symbol != upState.Symbol); + } + int cf = p.Freq - 1; + int s0 = pc.FreqData.SummFreq - pc.NumStats - cf; + + // UpState.Freq=1+((2*cf <= s0)?(5*cf > s0):((2*cf+3*s0-1)/(2*s0))); + upState.Freq = 1 + ((2 * cf <= s0) ? (5 * cf > s0 ? 1 : 0) : ((2 * cf + 3 * s0 - 1) / (2 * s0))); + } + else + { + upState.Freq = pc.GetOneState().Freq; // UpState.Freq=pc->OneState.Freq; + } + do + { + // pc = pc->createChild(this,*--pps,UpState); + tempState.Address = _ps[--pps]; + pc.Address = pc.CreateChild(this, tempState, upState); + if (pc.Address == 0) + { + return 0; + } + } + while (pps != 0); + return pc.Address; + } + + private void UpdateModelRestart() + { + RestartModelRare(); + _escCount = 0; + } + + private void UpdateModel() + { + //System.out.println("ModelPPM.updateModel()"); + // STATE fs = *FoundState, *p = NULL; + StateRef fs = _tempStateRef1; + fs.Values = FoundState; + State p = _tempState3.Initialize(Heap); + State tempState = _tempState4.Initialize(Heap); + + PpmContext pc = _tempPpmContext3.Initialize(Heap); + PpmContext successor = _tempPpmContext4.Initialize(Heap); + + int ns1, ns, cf, sf, s0; + pc.Address = _minContext.GetSuffix(); + if (fs.Freq < MAX_FREQ / 4 && pc.Address != 0) + { + if (pc.NumStats != 1) + { + p.Address = pc.FreqData.GetStats(); + if (p.Symbol != fs.Symbol) + { + do + { + p.IncrementAddress(); + } + while (p.Symbol != fs.Symbol); + tempState.Address = p.Address - State.SIZE; + if (p.Freq >= tempState.Freq) + { + State.PpmdSwap(p, tempState); + p.DecrementAddress(); + } + } + if (p.Freq < MAX_FREQ - 9) + { + p.IncrementFreq(2); + pc.FreqData.IncrementSummFreq(2); + } + } + else + { + p.Address = pc.GetOneState().Address; + if (p.Freq < 32) + { + p.IncrementFreq(1); + } + } + } + if (_orderFall == 0) + { + FoundState.SetSuccessor(CreateSuccessors(true, p)); + _minContext.Address = FoundState.GetSuccessor(); + _maxContext.Address = FoundState.GetSuccessor(); + if (_minContext.Address == 0) + { + UpdateModelRestart(); + return; + } + return; + } + SubAlloc.Heap[SubAlloc.PText] = (byte)fs.Symbol; + SubAlloc.IncPText(); + successor.Address = SubAlloc.PText; + if (SubAlloc.PText >= SubAlloc.FakeUnitsStart) + { + UpdateModelRestart(); + return; + } + + // // Debug + // subAlloc.dumpHeap(); + if (fs.GetSuccessor() != 0) + { + if (fs.GetSuccessor() <= SubAlloc.PText) + { + fs.SetSuccessor(CreateSuccessors(false, p)); + if (fs.GetSuccessor() == 0) + { + UpdateModelRestart(); + return; + } + } + if (--_orderFall == 0) + { + successor.Address = fs.GetSuccessor(); + if (_maxContext.Address != _minContext.Address) + { + SubAlloc.DecPText(1); + } + } + } + else + { + FoundState.SetSuccessor(successor.Address); + fs.SetSuccessor(_minContext); + } + + // // Debug + // subAlloc.dumpHeap(); + ns = _minContext.NumStats; + s0 = _minContext.FreqData.SummFreq - (ns) - (fs.Freq - 1); + for (pc.Address = _maxContext.Address; pc.Address != _minContext.Address; pc.Address = pc.GetSuffix()) + { + if ((ns1 = pc.NumStats) != 1) + { + if ((ns1 & 1) == 0) + { + //System.out.println(ns1); + pc.FreqData.SetStats(SubAlloc.ExpandUnits(pc.FreqData.GetStats(), Utility.URShift(ns1, 1))); + if (pc.FreqData.GetStats() == 0) + { + UpdateModelRestart(); + return; + } + } + + // bug fixed + // int sum = ((2 * ns1 < ns) ? 1 : 0) + + // 2 * ((4 * ((ns1 <= ns) ? 1 : 0)) & ((pc.getFreqData() + // .getSummFreq() <= 8 * ns1) ? 1 : 0)); + int sum = ((2 * ns1 < ns) ? 1 : 0) + + 2 * (((4 * ns1 <= ns) ? 1 : 0) & ((pc.FreqData.SummFreq <= 8 * ns1) ? 1 : 0)); + pc.FreqData.IncrementSummFreq(sum); + } + else + { + p.Address = SubAlloc.AllocUnits(1); + if (p.Address == 0) + { + UpdateModelRestart(); + return; + } + p.SetValues(pc.GetOneState()); + pc.FreqData.SetStats(p); + if (p.Freq < MAX_FREQ / 4 - 1) + { + p.IncrementFreq(p.Freq); + } + else + { + p.Freq = MAX_FREQ - 4; + } + pc.FreqData.SummFreq = (p.Freq + _initEsc + (ns > 3 ? 1 : 0)); + } + cf = 2 * fs.Freq * (pc.FreqData.SummFreq + 6); + sf = s0 + pc.FreqData.SummFreq; + if (cf < 6 * sf) + { + cf = 1 + (cf > sf ? 1 : 0) + (cf >= 4 * sf ? 1 : 0); + pc.FreqData.IncrementSummFreq(3); + } + else + { + cf = 4 + (cf >= 9 * sf ? 1 : 0) + (cf >= 12 * sf ? 1 : 0) + (cf >= 15 * sf ? 1 : 0); + pc.FreqData.IncrementSummFreq(cf); + } + p.Address = pc.FreqData.GetStats() + ns1 * State.SIZE; + p.SetSuccessor(successor); + p.Symbol = fs.Symbol; + p.Freq = cf; + pc.NumStats = ++ns1; + } + + int address = fs.GetSuccessor(); + _maxContext.Address = address; + _minContext.Address = address; + + //TODO-----debug + // int pos = minContext.getFreqData().getStats(); + // State a = new State(getHeap()); + // a.Address=pos); + // pos+=State.size; + // a.Address=pos); + //--dbg end + } + + // Debug + public override String ToString() + { + StringBuilder buffer = new StringBuilder(); + buffer.Append("ModelPPM["); + buffer.Append("\n numMasked="); + buffer.Append(_numMasked); + buffer.Append("\n initEsc="); + buffer.Append(_initEsc); + buffer.Append("\n orderFall="); + buffer.Append(_orderFall); + buffer.Append("\n maxOrder="); + buffer.Append(_maxOrder); + buffer.Append("\n runLength="); + buffer.Append(_runLength); + buffer.Append("\n initRL="); + buffer.Append(_initRl); + buffer.Append("\n escCount="); + buffer.Append(_escCount); + buffer.Append("\n prevSuccess="); + buffer.Append(_prevSuccess); + buffer.Append("\n foundState="); + buffer.Append(FoundState); + buffer.Append("\n coder="); + buffer.Append(Coder); + buffer.Append("\n subAlloc="); + buffer.Append(SubAlloc); + buffer.Append("\n]"); + return buffer.ToString(); + } + + // Debug + // public void dumpHeap() { + // subAlloc.dumpHeap(); + // } + + internal bool DecodeInit(Stream stream, int maxOrder, int maxMemory) + { + if (stream != null) + { + Coder = new RangeCoder(stream); + } + + if (maxOrder == 1) + { + SubAlloc.StopSubAllocator(); + return (false); + } + SubAlloc.StartSubAllocator(maxMemory); + _minContext = new PpmContext(Heap); + + //medContext = new PPMContext(Heap); + _maxContext = new PpmContext(Heap); + FoundState = new State(Heap); + _dummySee2Cont = new See2Context(); + for (int i = 0; i < 25; i++) + { + for (int j = 0; j < 16; j++) + { + _see2Cont[i][j] = new See2Context(); + } + } + StartModelRare(maxOrder); + + return (_minContext.Address != 0); + } + + internal void NextContext() + { + int addr = FoundState.GetSuccessor(); + if (_orderFall == 0 && addr > SubAlloc.PText) + { + _minContext.Address = addr; + _maxContext.Address = addr; + } + else + { + UpdateModel(); + } + } + + public int DecodeChar(Decoder decoder) + { + if (_minContext.NumStats != 1) + { + State s = _tempState1.Initialize(Heap); + s.Address = _minContext.FreqData.GetStats(); + int i; + int count, hiCnt; + if ((count = (int)decoder.GetThreshold((uint)_minContext.FreqData.SummFreq)) < (hiCnt = s.Freq)) + { + byte symbol; + decoder.Decode(0, (uint)s.Freq); + symbol = (byte)s.Symbol; + _minContext.update1_0(this, s.Address); + NextContext(); + return symbol; + } + _prevSuccess = 0; + i = _minContext.NumStats - 1; + do + { + s.IncrementAddress(); + if ((hiCnt += s.Freq) > count) + { + byte symbol; + decoder.Decode((uint)(hiCnt - s.Freq), (uint)s.Freq); + symbol = (byte)s.Symbol; + _minContext.Update1(this, s.Address); + NextContext(); + return symbol; + } + } + while (--i > 0); + if (count >= _minContext.FreqData.SummFreq) + { + return -2; + } + _hiBitsFlag = _hb2Flag[FoundState.Symbol]; + decoder.Decode((uint)hiCnt, (uint)(_minContext.FreqData.SummFreq - hiCnt)); + for (i = 0; i < 256; i++) + { + _charMask[i] = -1; + } + _charMask[s.Symbol] = 0; + i = _minContext.NumStats - 1; + do + { + s.DecrementAddress(); + _charMask[s.Symbol] = 0; + } + while (--i > 0); + } + else + { + State rs = _tempState1.Initialize(Heap); + rs.Address = _minContext.GetOneState().Address; + _hiBitsFlag = GetHb2Flag()[FoundState.Symbol]; + int off1 = rs.Freq - 1; + int off2 = _minContext.GetArrayIndex(this, rs); + int bs = _binSumm[off1][off2]; + if (decoder.DecodeBit((uint)bs, 14) == 0) + { + byte symbol; + _binSumm[off1][off2] = (bs + INTERVAL - _minContext.GetMean(bs, PERIOD_BITS, 2)) & 0xFFFF; + FoundState.Address = rs.Address; + symbol = (byte)rs.Symbol; + rs.IncrementFreq((rs.Freq < 128) ? 1 : 0); + _prevSuccess = 1; + IncRunLength(1); + NextContext(); + return symbol; + } + bs = (bs - _minContext.GetMean(bs, PERIOD_BITS, 2)) & 0xFFFF; + _binSumm[off1][off2] = bs; + _initEsc = PpmContext.EXP_ESCAPE[Utility.URShift(bs, 10)]; + int i; + for (i = 0; i < 256; i++) + { + _charMask[i] = -1; + } + _charMask[rs.Symbol] = 0; + _prevSuccess = 0; + } + for (;;) + { + State s = _tempState1.Initialize(Heap); + int i; + int freqSum, count, hiCnt; + See2Context see; + int num, numMasked = _minContext.NumStats; + do + { + _orderFall++; + _minContext.Address = _minContext.GetSuffix(); + if (_minContext.Address <= SubAlloc.PText || _minContext.Address > SubAlloc.HeapEnd) + { + return -1; + } + } + while (_minContext.NumStats == numMasked); + hiCnt = 0; + s.Address = _minContext.FreqData.GetStats(); + i = 0; + num = _minContext.NumStats - numMasked; + do + { + int k = _charMask[s.Symbol]; + hiCnt += s.Freq & k; + _minContext._ps[i] = s.Address; + s.IncrementAddress(); + i -= k; + } + while (i != num); + + see = _minContext.MakeEscFreq(this, numMasked, out freqSum); + freqSum += hiCnt; + count = (int)decoder.GetThreshold((uint)freqSum); + + if (count < hiCnt) + { + byte symbol; + State ps = _tempState2.Initialize(Heap); + for (hiCnt = 0, i = 0, ps.Address = _minContext._ps[i]; + (hiCnt += ps.Freq) <= count; + i++, ps.Address = _minContext._ps[i]) + { + ; + } + s.Address = ps.Address; + decoder.Decode((uint)(hiCnt - s.Freq), (uint)s.Freq); + see.Update(); + symbol = (byte)s.Symbol; + _minContext.Update2(this, s.Address); + UpdateModel(); + return symbol; + } + if (count >= freqSum) + { + return -2; + } + decoder.Decode((uint)hiCnt, (uint)(freqSum - hiCnt)); + see.Summ = see.Summ + freqSum; + do + { + s.Address = _minContext._ps[--i]; + _charMask[s.Symbol] = 0; + } + while (i != 0); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/PPMContext.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/PPMContext.cs new file mode 100644 index 0000000000..7be3100371 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/PPMContext.cs @@ -0,0 +1,567 @@ +using System; +using System.Text; +using SharpCompress.Converters; + +namespace SharpCompress.Compressors.PPMd.H +{ + internal class PpmContext : Pointer + { + internal FreqData FreqData + { + get => _freqData; + set + { + _freqData.SummFreq = value.SummFreq; + _freqData.SetStats(value.GetStats()); + } + } + + public virtual int NumStats + { + get + { + if (Memory != null) + { + _numStats = DataConverter.LittleEndian.GetInt16(Memory, Address) & 0xffff; + } + return _numStats; + } + + set + { + _numStats = value & 0xffff; + if (Memory != null) + { + DataConverter.LittleEndian.PutBytes(Memory, Address, (short)value); + } + } + } + + //UPGRADE_NOTE: Final was removed from the declaration of 'unionSize '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + //UPGRADE_NOTE: The initialization of 'unionSize' was moved to static method 'SharpCompress.Unpack.PPM.PPMContext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1005'" + private static readonly int UNION_SIZE; + + //UPGRADE_NOTE: Final was removed from the declaration of 'size '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + public static readonly int SIZE = 2 + UNION_SIZE + 4; // 12 + + // ushort NumStats; + private int _numStats; // determines if feqData or onstate is used + + // (1==onestate) + + //UPGRADE_NOTE: Final was removed from the declaration of 'freqData '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly FreqData _freqData; // -\ + + // |-> union + //UPGRADE_NOTE: Final was removed from the declaration of 'oneState '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly State _oneState; // -/ + + private int _suffix; // pointer ppmcontext + + //UPGRADE_NOTE: Final was removed from the declaration of 'ExpEscape'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + public static readonly int[] EXP_ESCAPE = {25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2}; + + // Temp fields + //UPGRADE_NOTE: Final was removed from the declaration of 'tempState1 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly State _tempState1 = new State(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempState2 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly State _tempState2 = new State(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempState3 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly State _tempState3 = new State(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempState4 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly State _tempState4 = new State(null); + + //UPGRADE_NOTE: Final was removed from the declaration of 'tempState5 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly State _tempState5 = new State(null); + private PpmContext _tempPpmContext; + + //UPGRADE_NOTE: Final was removed from the declaration of 'ps '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + internal int[] _ps = new int[256]; + + public PpmContext(byte[] memory) + : base(memory) + { + _oneState = new State(memory); + _freqData = new FreqData(memory); + } + + internal PpmContext Initialize(byte[] mem) + { + _oneState.Initialize(mem); + _freqData.Initialize(mem); + return base.Initialize(mem); + } + + internal State GetOneState() + { + return _oneState; + } + + internal void SetOneState(StateRef oneState) + { + _oneState.SetValues(oneState); + } + + internal int GetSuffix() + { + if (Memory != null) + { + _suffix = DataConverter.LittleEndian.GetInt32(Memory, Address + 8); + } + return _suffix; + } + + internal void SetSuffix(PpmContext suffix) + { + SetSuffix(suffix.Address); + } + + internal void SetSuffix(int suffix) + { + _suffix = suffix; + if (Memory != null) + { + DataConverter.LittleEndian.PutBytes(Memory, Address + 8, suffix); + } + } + + internal override int Address + { + get => base.Address; + set + { + base.Address = value; + _oneState.Address = value + 2; + _freqData.Address = value + 2; + } + } + + private PpmContext GetTempPpmContext(byte[] memory) + { + if (_tempPpmContext == null) + { + _tempPpmContext = new PpmContext(null); + } + return _tempPpmContext.Initialize(memory); + } + + internal int CreateChild(ModelPpm model, State pStats, StateRef firstState) + { + PpmContext pc = GetTempPpmContext(model.SubAlloc.Heap); + pc.Address = model.SubAlloc.AllocContext(); + if (pc != null) + { + pc.NumStats = 1; + pc.SetOneState(firstState); + pc.SetSuffix(this); + pStats.SetSuccessor(pc); + } + return pc.Address; + } + + internal void Rescale(ModelPpm model) + { + int oldNs = NumStats, i = NumStats - 1, adder, escFreq; + + // STATE* p1, * p; + State p1 = new State(model.Heap); + State p = new State(model.Heap); + State temp = new State(model.Heap); + + for (p.Address = model.FoundState.Address; p.Address != _freqData.GetStats(); p.DecrementAddress()) + { + temp.Address = p.Address - State.SIZE; + State.PpmdSwap(p, temp); + } + temp.Address = _freqData.GetStats(); + temp.IncrementFreq(4); + _freqData.IncrementSummFreq(4); + escFreq = _freqData.SummFreq - p.Freq; + adder = (model.OrderFall != 0) ? 1 : 0; + p.Freq = Utility.URShift((p.Freq + adder), 1); + _freqData.SummFreq = p.Freq; + do + { + p.IncrementAddress(); + escFreq -= p.Freq; + p.Freq = Utility.URShift((p.Freq + adder), 1); + _freqData.IncrementSummFreq(p.Freq); + temp.Address = p.Address - State.SIZE; + if (p.Freq > temp.Freq) + { + p1.Address = p.Address; + StateRef tmp = new StateRef(); + tmp.Values = p1; + State temp2 = new State(model.Heap); + State temp3 = new State(model.Heap); + do + { + // p1[0]=p1[-1]; + temp2.Address = p1.Address - State.SIZE; + p1.SetValues(temp2); + p1.DecrementAddress(); + temp3.Address = p1.Address - State.SIZE; + } + while (p1.Address != _freqData.GetStats() && tmp.Freq > temp3.Freq); + p1.SetValues(tmp); + } + } + while (--i != 0); + if (p.Freq == 0) + { + do + { + i++; + p.DecrementAddress(); + } + while (p.Freq == 0); + escFreq += i; + NumStats = NumStats - i; + if (NumStats == 1) + { + StateRef tmp = new StateRef(); + temp.Address = _freqData.GetStats(); + tmp.Values = temp; + + // STATE tmp=*U.Stats; + do + { + // tmp.Freq-=(tmp.Freq >> 1) + tmp.DecrementFreq(Utility.URShift(tmp.Freq, 1)); + escFreq = Utility.URShift(escFreq, 1); + } + while (escFreq > 1); + model.SubAlloc.FreeUnits(_freqData.GetStats(), Utility.URShift((oldNs + 1), 1)); + _oneState.SetValues(tmp); + model.FoundState.Address = _oneState.Address; + return; + } + } + escFreq -= Utility.URShift(escFreq, 1); + _freqData.IncrementSummFreq(escFreq); + int n0 = Utility.URShift((oldNs + 1), 1), n1 = Utility.URShift((NumStats + 1), 1); + if (n0 != n1) + { + _freqData.SetStats(model.SubAlloc.ShrinkUnits(_freqData.GetStats(), n0, n1)); + } + model.FoundState.Address = _freqData.GetStats(); + } + + internal int GetArrayIndex(ModelPpm model, State rs) + { + PpmContext tempSuffix = GetTempPpmContext(model.SubAlloc.Heap); + tempSuffix.Address = GetSuffix(); + int ret = 0; + ret += model.PrevSuccess; + ret += model.GetNs2BsIndx()[tempSuffix.NumStats - 1]; + ret += model.HiBitsFlag + 2 * model.GetHb2Flag()[rs.Symbol]; + ret += ((Utility.URShift(model.RunLength, 26)) & 0x20); + return ret; + } + + internal int GetMean(int summ, int shift, int round) + { + return (Utility.URShift((summ + (1 << (shift - round))), (shift))); + } + + internal void DecodeBinSymbol(ModelPpm model) + { + State rs = _tempState1.Initialize(model.Heap); + rs.Address = _oneState.Address; // State& + model.HiBitsFlag = model.GetHb2Flag()[model.FoundState.Symbol]; + int off1 = rs.Freq - 1; + int off2 = GetArrayIndex(model, rs); + int bs = model.BinSumm[off1][off2]; + if (model.Coder.GetCurrentShiftCount(ModelPpm.TOT_BITS) < bs) + { + model.FoundState.Address = rs.Address; + rs.IncrementFreq((rs.Freq < 128) ? 1 : 0); + model.Coder.SubRange.LowCount = 0; + model.Coder.SubRange.HighCount = bs; + bs = ((bs + ModelPpm.INTERVAL - GetMean(bs, ModelPpm.PERIOD_BITS, 2)) & 0xffff); + model.BinSumm[off1][off2] = bs; + model.PrevSuccess = 1; + model.IncRunLength(1); + } + else + { + model.Coder.SubRange.LowCount = bs; + bs = (bs - GetMean(bs, ModelPpm.PERIOD_BITS, 2)) & 0xFFFF; + model.BinSumm[off1][off2] = bs; + model.Coder.SubRange.HighCount = ModelPpm.BIN_SCALE; + model.InitEsc = EXP_ESCAPE[Utility.URShift(bs, 10)]; + model.NumMasked = 1; + model.CharMask[rs.Symbol] = model.EscCount; + model.PrevSuccess = 0; + model.FoundState.Address = 0; + } + + //int a = 0;//TODO just 4 debugging + } + + // public static void ppmdSwap(ModelPPM model, StatePtr state1, StatePtr state2) + // { + // byte[] bytes = model.getSubAlloc().getHeap(); + // int p1 = state1.Address; + // int p2 = state2.Address; + // + // for (int i = 0; i < StatePtr.size; i++) { + // byte temp = bytes[p1+i]; + // bytes[p1+i] = bytes[p2+i]; + // bytes[p2+i] = temp; + // } + // state1.Address=p1); + // state2.Address=p2); + // } + + internal void Update1(ModelPpm model, int p) + { + model.FoundState.Address = p; + model.FoundState.IncrementFreq(4); + _freqData.IncrementSummFreq(4); + State p0 = _tempState3.Initialize(model.Heap); + State p1 = _tempState4.Initialize(model.Heap); + p0.Address = p; + p1.Address = p - State.SIZE; + if (p0.Freq > p1.Freq) + { + State.PpmdSwap(p0, p1); + model.FoundState.Address = p1.Address; + if (p1.Freq > ModelPpm.MAX_FREQ) + { + Rescale(model); + } + } + } + + internal void update1_0(ModelPpm model, int p) + { + model.FoundState.Address = p; + model.PrevSuccess = 2 * model.FoundState.Freq > _freqData.SummFreq ? 1 : 0; + model.IncRunLength(model.PrevSuccess); + _freqData.IncrementSummFreq(4); + model.FoundState.IncrementFreq(4); + if (model.FoundState.Freq > ModelPpm.MAX_FREQ) + { + Rescale(model); + } + } + + internal bool DecodeSymbol2(ModelPpm model) + { + long count; + int hiCnt, i = NumStats - model.NumMasked; + See2Context psee2C = MakeEscFreq2(model, i); + RangeCoder coder = model.Coder; + + // STATE* ps[256], ** pps=ps, * p=U.Stats-1; + State p = _tempState1.Initialize(model.Heap); + State temp = _tempState2.Initialize(model.Heap); + p.Address = _freqData.GetStats() - State.SIZE; + int pps = 0; + hiCnt = 0; + + do + { + do + { + p.IncrementAddress(); // p++; + } + while (model.CharMask[p.Symbol] == model.EscCount); + hiCnt += p.Freq; + _ps[pps++] = p.Address; + } + while (--i != 0); + coder.SubRange.IncScale(hiCnt); + count = coder.CurrentCount; + if (count >= coder.SubRange.Scale) + { + return false; + } + pps = 0; + p.Address = _ps[pps]; + if (count < hiCnt) + { + hiCnt = 0; + while ((hiCnt += p.Freq) <= count) + { + p.Address = _ps[++pps]; // p=*++pps; + } + coder.SubRange.HighCount = hiCnt; + coder.SubRange.LowCount = hiCnt - p.Freq; + psee2C.Update(); + Update2(model, p.Address); + } + else + { + coder.SubRange.LowCount = hiCnt; + coder.SubRange.HighCount = coder.SubRange.Scale; + i = NumStats - model.NumMasked; // ->NumMasked; + pps--; + do + { + temp.Address = _ps[++pps]; // (*++pps) + model.CharMask[temp.Symbol] = model.EscCount; + } + while (--i != 0); + psee2C.IncSumm((int)coder.SubRange.Scale); + model.NumMasked = NumStats; + } + return (true); + } + + internal void Update2(ModelPpm model, int p) + { + State temp = _tempState5.Initialize(model.Heap); + temp.Address = p; + model.FoundState.Address = p; + model.FoundState.IncrementFreq(4); + _freqData.IncrementSummFreq(4); + if (temp.Freq > ModelPpm.MAX_FREQ) + { + Rescale(model); + } + model.IncEscCount(1); + model.RunLength = model.InitRl; + } + + private See2Context MakeEscFreq2(ModelPpm model, int diff) + { + See2Context psee2C; + int numStats = NumStats; + if (numStats != 256) + { + PpmContext suff = GetTempPpmContext(model.Heap); + suff.Address = GetSuffix(); + int idx1 = model.GetNs2Indx()[diff - 1]; + int idx2 = 0; + idx2 += ((diff < suff.NumStats - numStats) ? 1 : 0); + idx2 += 2 * ((_freqData.SummFreq < 11 * numStats) ? 1 : 0); + idx2 += 4 * ((model.NumMasked > diff) ? 1 : 0); + idx2 += model.HiBitsFlag; + psee2C = model.GetSee2Cont()[idx1][idx2]; + model.Coder.SubRange.Scale = psee2C.Mean; + } + else + { + psee2C = model.DummySee2Cont; + model.Coder.SubRange.Scale = 1; + } + return psee2C; + } + + internal See2Context MakeEscFreq(ModelPpm model, int numMasked, out int escFreq) + { + See2Context psee2C; + int numStats = NumStats; + int nonMasked = numStats - numMasked; + if (numStats != 256) + { + PpmContext suff = GetTempPpmContext(model.Heap); + suff.Address = GetSuffix(); + int idx1 = model.GetNs2Indx()[nonMasked - 1]; + int idx2 = 0; + idx2 += ((nonMasked < suff.NumStats - numStats) ? 1 : 0); + idx2 += 2 * ((_freqData.SummFreq < 11 * numStats) ? 1 : 0); + idx2 += 4 * ((numMasked > nonMasked) ? 1 : 0); + idx2 += model.HiBitsFlag; + psee2C = model.GetSee2Cont()[idx1][idx2]; + escFreq = psee2C.Mean; + } + else + { + psee2C = model.DummySee2Cont; + escFreq = 1; + } + return psee2C; + } + + internal bool DecodeSymbol1(ModelPpm model) + { + RangeCoder coder = model.Coder; + coder.SubRange.Scale = _freqData.SummFreq; + State p = new State(model.Heap); + p.Address = _freqData.GetStats(); + int i, hiCnt; + long count = coder.CurrentCount; + if (count >= coder.SubRange.Scale) + { + return false; + } + if (count < (hiCnt = p.Freq)) + { + coder.SubRange.HighCount = hiCnt; + model.PrevSuccess = (2 * hiCnt > coder.SubRange.Scale) ? 1 : 0; + model.IncRunLength(model.PrevSuccess); + hiCnt += 4; + model.FoundState.Address = p.Address; + model.FoundState.Freq = hiCnt; + _freqData.IncrementSummFreq(4); + if (hiCnt > ModelPpm.MAX_FREQ) + { + Rescale(model); + } + coder.SubRange.LowCount = 0; + return true; + } + if (model.FoundState.Address == 0) + { + return (false); + } + model.PrevSuccess = 0; + int numStats = NumStats; + i = numStats - 1; + while ((hiCnt += p.IncrementAddress().Freq) <= count) + { + if (--i == 0) + { + model.HiBitsFlag = model.GetHb2Flag()[model.FoundState.Symbol]; + coder.SubRange.LowCount = hiCnt; + model.CharMask[p.Symbol] = model.EscCount; + model.NumMasked = numStats; + i = numStats - 1; + model.FoundState.Address = 0; + do + { + model.CharMask[p.DecrementAddress().Symbol] = model.EscCount; + } + while (--i != 0); + coder.SubRange.HighCount = coder.SubRange.Scale; + return (true); + } + } + coder.SubRange.LowCount = hiCnt - p.Freq; + coder.SubRange.HighCount = hiCnt; + Update1(model, p.Address); + return (true); + } + + public override String ToString() + { + StringBuilder buffer = new StringBuilder(); + buffer.Append("PPMContext["); + buffer.Append("\n Address="); + buffer.Append(Address); + buffer.Append("\n size="); + buffer.Append(SIZE); + buffer.Append("\n numStats="); + buffer.Append(NumStats); + buffer.Append("\n Suffix="); + buffer.Append(GetSuffix()); + buffer.Append("\n freqData="); + buffer.Append(_freqData); + buffer.Append("\n oneState="); + buffer.Append(_oneState); + buffer.Append("\n]"); + return buffer.ToString(); + } + + static PpmContext() + { + UNION_SIZE = Math.Max(FreqData.SIZE, State.SIZE); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/Pointer.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/Pointer.cs new file mode 100644 index 0000000000..c0ae8d8cee --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/Pointer.cs @@ -0,0 +1,25 @@ +namespace SharpCompress.Compressors.PPMd.H +{ + internal abstract class Pointer + { + /// Initialize the object with the array (may be null) + /// the byte array + /// + internal Pointer(byte[] mem) + { + Memory = mem; + } + + internal byte[] Memory { get; private set; } + + internal virtual int Address { get; set; } + + protected T Initialize(byte[] mem) + where T : Pointer + { + Memory = mem; + Address = 0; + return this as T; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RangeCoder.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RangeCoder.cs new file mode 100644 index 0000000000..e194685233 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RangeCoder.cs @@ -0,0 +1,155 @@ +using System; +using System.IO; +using System.Text; +using SharpCompress.Compressors.Rar; + +namespace SharpCompress.Compressors.PPMd.H +{ + internal class RangeCoder + { + internal const int TOP = 1 << 24; + internal const int BOT = 1 << 15; + internal const long UINT_MASK = 0xFFFFffffL; + + // uint low, code, range; + private long _low, _code, _range; + private readonly IRarUnpack _unpackRead; + private readonly Stream _stream; + + internal RangeCoder(IRarUnpack unpackRead) + { + _unpackRead = unpackRead; + Init(); + } + + internal RangeCoder(Stream stream) + { + _stream = stream; + Init(); + } + + private void Init() + { + SubRange = new SubRange(); + + _low = _code = 0L; + _range = 0xFFFFffffL; + for (int i = 0; i < 4; i++) + { + _code = ((_code << 8) | Char) & UINT_MASK; + } + } + + internal int CurrentCount + { + get + { + _range = (_range / SubRange.Scale) & UINT_MASK; + return (int)((_code - _low) / (_range)); + } + } + + private long Char + { + get + { + if (_unpackRead != null) + { + return (_unpackRead.Char); + } + if (_stream != null) + { + return _stream.ReadByte(); + } + return -1; + } + } + + internal SubRange SubRange { get; private set; } + + internal long GetCurrentShiftCount(int shift) + { + _range = Utility.URShift(_range, shift); + return ((_code - _low) / (_range)) & UINT_MASK; + } + + internal void Decode() + { + _low = (_low + (_range * SubRange.LowCount)) & UINT_MASK; + _range = (_range * (SubRange.HighCount - SubRange.LowCount)) & UINT_MASK; + } + + internal void AriDecNormalize() + { + // while ((low ^ (low + range)) < TOP || range < BOT && ((range = -low & (BOT - 1)) != 0 ? true : true)) + // { + // code = ((code << 8) | unpackRead.getChar()&0xff)&uintMask; + // range = (range << 8)&uintMask; + // low = (low << 8)&uintMask; + // } + + // Rewrote for clarity + bool c2 = false; + while ((_low ^ (_low + _range)) < TOP || (c2 = _range < BOT)) + { + if (c2) + { + _range = (-_low & (BOT - 1)) & UINT_MASK; + c2 = false; + } + _code = ((_code << 8) | Char) & UINT_MASK; + _range = (_range << 8) & UINT_MASK; + _low = (_low << 8) & UINT_MASK; + } + } + + // Debug + public override String ToString() + { + StringBuilder buffer = new StringBuilder(); + buffer.Append("RangeCoder["); + buffer.Append("\n low="); + buffer.Append(_low); + buffer.Append("\n code="); + buffer.Append(_code); + buffer.Append("\n range="); + buffer.Append(_range); + buffer.Append("\n subrange="); + buffer.Append(SubRange); + buffer.Append("]"); + return buffer.ToString(); + } + } + + internal class SubRange + { + // uint LowCount, HighCount, scale; + private long _lowCount, _highCount, _scale; + + internal void IncScale(int dScale) + { + Scale = Scale + dScale; + } + + internal long HighCount { get => _highCount; set => _highCount = value & RangeCoder.UINT_MASK; } + + internal long LowCount { get => _lowCount & RangeCoder.UINT_MASK; set => _lowCount = value & RangeCoder.UINT_MASK; } + + internal long Scale { get => _scale; set => _scale = value & RangeCoder.UINT_MASK; } + + // Debug + public override String ToString() + { + StringBuilder buffer = new StringBuilder(); + buffer.Append("SubRange["); + buffer.Append("\n lowCount="); + buffer.Append(_lowCount); + buffer.Append("\n highCount="); + buffer.Append(_highCount); + buffer.Append("\n scale="); + buffer.Append(_scale); + buffer.Append("]"); + return buffer.ToString(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RarMemBlock.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RarMemBlock.cs new file mode 100644 index 0000000000..78e6824cc8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RarMemBlock.cs @@ -0,0 +1,126 @@ +using SharpCompress.Converters; + +namespace SharpCompress.Compressors.PPMd.H +{ + internal class RarMemBlock : Pointer + { + public const int SIZE = 12; + + private int _stamp, _nu; + + private int _next, _prev; // Pointer RarMemBlock + + public RarMemBlock(byte[] memory) + : base(memory) + { + } + + internal int Stamp + { + get + { + if (Memory != null) + { + _stamp = DataConverter.LittleEndian.GetInt16(Memory, Address) & 0xffff; + } + return _stamp; + } + + set + { + _stamp = value; + if (Memory != null) + { + DataConverter.LittleEndian.PutBytes(Memory, Address, (short)value); + } + } + } + + internal void InsertAt(RarMemBlock p) + { + RarMemBlock temp = new RarMemBlock(Memory); + SetPrev(p.Address); + temp.Address = GetPrev(); + SetNext(temp.GetNext()); // prev.getNext(); + temp.SetNext(this); // prev.setNext(this); + temp.Address = GetNext(); + temp.SetPrev(this); // next.setPrev(this); + } + + internal void Remove() + { + RarMemBlock temp = new RarMemBlock(Memory); + temp.Address = GetPrev(); + temp.SetNext(GetNext()); // prev.setNext(next); + temp.Address = GetNext(); + temp.SetPrev(GetPrev()); // next.setPrev(prev); + + // next = -1; + // prev = -1; + } + + internal int GetNext() + { + if (Memory != null) + { + _next = DataConverter.LittleEndian.GetInt32(Memory, Address + 4); + } + return _next; + } + + internal void SetNext(RarMemBlock next) + { + SetNext(next.Address); + } + + internal void SetNext(int next) + { + _next = next; + if (Memory != null) + { + DataConverter.LittleEndian.PutBytes(Memory, Address + 4, next); + } + } + + internal int GetNu() + { + if (Memory != null) + { + _nu = DataConverter.LittleEndian.GetInt16(Memory, Address + 2) & 0xffff; + } + return _nu; + } + + internal void SetNu(int nu) + { + _nu = nu & 0xffff; + if (Memory != null) + { + DataConverter.LittleEndian.PutBytes(Memory, Address + 2, (short)nu); + } + } + + internal int GetPrev() + { + if (Memory != null) + { + _prev = DataConverter.LittleEndian.GetInt32(Memory, Address + 8); + } + return _prev; + } + + internal void SetPrev(RarMemBlock prev) + { + SetPrev(prev.Address); + } + + internal void SetPrev(int prev) + { + _prev = prev; + if (Memory != null) + { + DataConverter.LittleEndian.PutBytes(Memory, Address + 8, prev); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RarNode.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RarNode.cs new file mode 100644 index 0000000000..3cbf28949e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/RarNode.cs @@ -0,0 +1,54 @@ +using System.Text; +using SharpCompress.Converters; + +namespace SharpCompress.Compressors.PPMd.H +{ + internal class RarNode : Pointer + { + private int _next; //rarnode pointer + + public const int SIZE = 4; + + public RarNode(byte[] memory) + : base(memory) + { + } + + internal int GetNext() + { + if (Memory != null) + { + _next = DataConverter.LittleEndian.GetInt32(Memory, Address); + } + return _next; + } + + internal void SetNext(RarNode next) + { + SetNext(next.Address); + } + + internal void SetNext(int next) + { + _next = next; + if (Memory != null) + { + DataConverter.LittleEndian.PutBytes(Memory, Address, next); + } + } + + public override string ToString() + { + StringBuilder buffer = new StringBuilder(); + buffer.Append("State["); + buffer.Append("\n Address="); + buffer.Append(Address); + buffer.Append("\n size="); + buffer.Append(SIZE); + buffer.Append("\n next="); + buffer.Append(GetNext()); + buffer.Append("\n]"); + return buffer.ToString(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/SEE2Context.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/SEE2Context.cs new file mode 100644 index 0000000000..c97f083671 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/SEE2Context.cs @@ -0,0 +1,75 @@ +using System; +using System.Text; + +namespace SharpCompress.Compressors.PPMd.H +{ + internal class See2Context + { + public virtual int Mean + { + get + { + int retVal = Utility.URShift(_summ, _shift); + _summ -= retVal; + return retVal + ((retVal == 0) ? 1 : 0); + } + } + + public virtual int Count { get => _count; set => _count = value & 0xff; } + + public virtual int Shift { get => _shift; set => _shift = value & 0xff; } + + public virtual int Summ { get => _summ; set => _summ = value & 0xffff; } + + public const int SIZE = 4; + + // ushort Summ; + private int _summ; + + // byte Shift; + private int _shift; + + // byte Count; + private int _count; + + public void Initialize(int initVal) + { + _shift = (ModelPpm.PERIOD_BITS - 4) & 0xff; + _summ = (initVal << _shift) & 0xffff; + _count = 4; + } + + public virtual void Update() + { + if (_shift < ModelPpm.PERIOD_BITS && --_count == 0) + { + _summ += _summ; + _count = (3 << _shift++); + } + _summ &= 0xffff; + _count &= 0xff; + _shift &= 0xff; + } + + public virtual void IncSumm(int dSumm) + { + Summ = Summ + dSumm; + } + + public override String ToString() + { + StringBuilder buffer = new StringBuilder(); + buffer.Append("SEE2Context["); + buffer.Append("\n size="); + buffer.Append(SIZE); + buffer.Append("\n summ="); + buffer.Append(_summ); + buffer.Append("\n shift="); + buffer.Append(_shift); + buffer.Append("\n count="); + buffer.Append(_count); + buffer.Append("\n]"); + return buffer.ToString(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/State.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/State.cs new file mode 100644 index 0000000000..8062da144c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/State.cs @@ -0,0 +1,98 @@ +using System; +using System.Text; +using SharpCompress.Converters; + +namespace SharpCompress.Compressors.PPMd.H +{ + internal class State : Pointer + { + internal const int SIZE = 6; + + internal State(byte[] memory) + : base(memory) + { + } + + internal int Symbol { get => Memory[Address] & 0xff; set => Memory[Address] = (byte)value; } + + internal int Freq { get => Memory[Address + 1] & 0xff; set => Memory[Address + 1] = (byte)value; } + + internal State Initialize(byte[] mem) + { + return base.Initialize(mem); + } + + internal void IncrementFreq(int dFreq) + { + Memory[Address + 1] = (byte)(Memory[Address + 1] + dFreq); + } + + internal int GetSuccessor() + { + return DataConverter.LittleEndian.GetInt32(Memory, Address + 2); + } + + internal void SetSuccessor(PpmContext successor) + { + SetSuccessor(successor.Address); + } + + internal void SetSuccessor(int successor) + { + DataConverter.LittleEndian.PutBytes(Memory, Address + 2, successor); + } + + internal void SetValues(StateRef state) + { + Symbol = state.Symbol; + Freq = state.Freq; + SetSuccessor(state.GetSuccessor()); + } + + internal void SetValues(State ptr) + { + Array.Copy(ptr.Memory, ptr.Address, Memory, Address, SIZE); + } + + internal State DecrementAddress() + { + Address = Address - SIZE; + return this; + } + + internal State IncrementAddress() + { + Address = Address + SIZE; + return this; + } + + internal static void PpmdSwap(State ptr1, State ptr2) + { + byte[] mem1 = ptr1.Memory, mem2 = ptr2.Memory; + for (int i = 0, pos1 = ptr1.Address, pos2 = ptr2.Address; i < SIZE; i++, pos1++, pos2++) + { + byte temp = mem1[pos1]; + mem1[pos1] = mem2[pos2]; + mem2[pos2] = temp; + } + } + + public override String ToString() + { + StringBuilder buffer = new StringBuilder(); + buffer.Append("State["); + buffer.Append("\n Address="); + buffer.Append(Address); + buffer.Append("\n size="); + buffer.Append(SIZE); + buffer.Append("\n symbol="); + buffer.Append(Symbol); + buffer.Append("\n freq="); + buffer.Append(Freq); + buffer.Append("\n successor="); + buffer.Append(GetSuccessor()); + buffer.Append("\n]"); + return buffer.ToString(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/StateRef.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/StateRef.cs new file mode 100644 index 0000000000..f1e8e90f8f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/StateRef.cs @@ -0,0 +1,67 @@ +using System; +using System.Text; + +namespace SharpCompress.Compressors.PPMd.H +{ + internal class StateRef + { + private int _symbol; + + private int _freq; + + private int _successor; // pointer ppmcontext + + internal int Symbol { get => _symbol; set => _symbol = value & 0xff; } + + internal int Freq { get => _freq; set => _freq = value & 0xff; } + + internal State Values + { + set + { + Freq = value.Freq; + SetSuccessor(value.GetSuccessor()); + Symbol = value.Symbol; + } + } + + public virtual void IncrementFreq(int dFreq) + { + _freq = (_freq + dFreq) & 0xff; + } + + public virtual void DecrementFreq(int dFreq) + { + _freq = (_freq - dFreq) & 0xff; + } + + public virtual int GetSuccessor() + { + return _successor; + } + + public virtual void SetSuccessor(PpmContext successor) + { + SetSuccessor(successor.Address); + } + + public virtual void SetSuccessor(int successor) + { + _successor = successor; + } + + public override String ToString() + { + StringBuilder buffer = new StringBuilder(); + buffer.Append("State["); + buffer.Append("\n symbol="); + buffer.Append(Symbol); + buffer.Append("\n freq="); + buffer.Append(Freq); + buffer.Append("\n successor="); + buffer.Append(GetSuccessor()); + buffer.Append("\n]"); + return buffer.ToString(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/SubAllocator.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/SubAllocator.cs new file mode 100644 index 0000000000..c49eed97d8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/H/SubAllocator.cs @@ -0,0 +1,451 @@ +using System; +using System.Text; + +namespace SharpCompress.Compressors.PPMd.H +{ + internal class SubAllocator + { + public virtual int FakeUnitsStart { get => _fakeUnitsStart; set => _fakeUnitsStart = value; } + + public virtual int HeapEnd => _heapEnd; + + public virtual int PText { get => _pText; set => _pText = value; } + + public virtual int UnitsStart { get => _unitsStart; set => _unitsStart = value; } + + public virtual byte[] Heap => _heap; + + //UPGRADE_NOTE: Final was removed from the declaration of 'N4 '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + public const int N1 = 4; + public const int N2 = 4; + public const int N3 = 4; + public static readonly int N4 = (128 + 3 - 1 * N1 - 2 * N2 - 3 * N3) / 4; + + //UPGRADE_NOTE: Final was removed from the declaration of 'N_INDEXES '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + public static readonly int N_INDEXES = N1 + N2 + N3 + N4; + + //UPGRADE_NOTE: Final was removed from the declaration of 'UNIT_SIZE '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + //UPGRADE_NOTE: The initialization of 'UNIT_SIZE' was moved to static method 'SharpCompress.Unpack.PPM.SubAllocator'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1005'" + public static readonly int UNIT_SIZE; + + public const int FIXED_UNIT_SIZE = 12; + + private int _subAllocatorSize; + + // byte Indx2Units[N_INDEXES], Units2Indx[128], GlueCount; + private readonly int[] _indx2Units = new int[N_INDEXES]; + private readonly int[] _units2Indx = new int[128]; + private int _glueCount; + + // byte *HeapStart,*LoUnit, *HiUnit; + private int _heapStart, _loUnit, _hiUnit; + + //UPGRADE_NOTE: Final was removed from the declaration of 'freeList '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + private readonly RarNode[] _freeList = new RarNode[N_INDEXES]; + + // byte *pText, *UnitsStart,*HeapEnd,*FakeUnitsStart; + private int _pText, _unitsStart, _heapEnd, _fakeUnitsStart; + + private byte[] _heap; + + private int _freeListPos; + + private int _tempMemBlockPos; + + // Temp fields + private RarNode _tempRarNode; + private RarMemBlock _tempRarMemBlock1; + private RarMemBlock _tempRarMemBlock2; + private RarMemBlock _tempRarMemBlock3; + + public SubAllocator() + { + Clean(); + } + + public virtual void Clean() + { + _subAllocatorSize = 0; + } + + private void InsertNode(int p, int indx) + { + RarNode temp = _tempRarNode; + temp.Address = p; + temp.SetNext(_freeList[indx].GetNext()); + _freeList[indx].SetNext(temp); + } + + public virtual void IncPText() + { + _pText++; + } + + private int RemoveNode(int indx) + { + int retVal = _freeList[indx].GetNext(); + RarNode temp = _tempRarNode; + temp.Address = retVal; + _freeList[indx].SetNext(temp.GetNext()); + return retVal; + } + + private int U2B(int nu) + { + return UNIT_SIZE * nu; + } + + /* memblockptr */ + + private int MbPtr(int basePtr, int items) + { + return (basePtr + U2B(items)); + } + + private void SplitBlock(int pv, int oldIndx, int newIndx) + { + int i, uDiff = _indx2Units[oldIndx] - _indx2Units[newIndx]; + int p = pv + U2B(_indx2Units[newIndx]); + if (_indx2Units[i = _units2Indx[uDiff - 1]] != uDiff) + { + InsertNode(p, --i); + p += U2B(i = _indx2Units[i]); + uDiff -= i; + } + InsertNode(p, _units2Indx[uDiff - 1]); + } + + public virtual void StopSubAllocator() + { + if (_subAllocatorSize != 0) + { + _subAllocatorSize = 0; + + //ArrayFactory.BYTES_FACTORY.recycle(heap); + _heap = null; + _heapStart = 1; + + // rarfree(HeapStart); + // Free temp fields + _tempRarNode = null; + _tempRarMemBlock1 = null; + _tempRarMemBlock2 = null; + _tempRarMemBlock3 = null; + } + } + + public virtual int GetAllocatedMemory() + { + return _subAllocatorSize; + } + + public virtual bool StartSubAllocator(int saSize) + { + int t = saSize; + if (_subAllocatorSize == t) + { + return true; + } + StopSubAllocator(); + int allocSize = t / FIXED_UNIT_SIZE * UNIT_SIZE + UNIT_SIZE; + + // adding space for freelist (needed for poiters) + // 1+ for null pointer + int realAllocSize = 1 + allocSize + 4 * N_INDEXES; + + // adding space for an additional memblock + _tempMemBlockPos = realAllocSize; + realAllocSize += RarMemBlock.SIZE; + + _heap = new byte[realAllocSize]; + _heapStart = 1; + _heapEnd = _heapStart + allocSize - UNIT_SIZE; + _subAllocatorSize = t; + + // Bug fixed + _freeListPos = _heapStart + allocSize; + + //UPGRADE_ISSUE: The following fragment of code could not be parsed and was not converted. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1156'" + //assert(realAllocSize - tempMemBlockPos == RarMemBlock.size): realAllocSize + //+ + tempMemBlockPos + + RarMemBlock.size; + + // Init freeList + for (int i = 0, pos = _freeListPos; i < _freeList.Length; i++, pos += RarNode.SIZE) + { + _freeList[i] = new RarNode(_heap); + _freeList[i].Address = pos; + } + + // Init temp fields + _tempRarNode = new RarNode(_heap); + _tempRarMemBlock1 = new RarMemBlock(_heap); + _tempRarMemBlock2 = new RarMemBlock(_heap); + _tempRarMemBlock3 = new RarMemBlock(_heap); + + return true; + } + + private void GlueFreeBlocks() + { + RarMemBlock s0 = _tempRarMemBlock1; + s0.Address = _tempMemBlockPos; + RarMemBlock p = _tempRarMemBlock2; + RarMemBlock p1 = _tempRarMemBlock3; + int i, k, sz; + if (_loUnit != _hiUnit) + { + _heap[_loUnit] = 0; + } + for (i = 0, s0.SetPrev(s0), s0.SetNext(s0); i < N_INDEXES; i++) + { + while (_freeList[i].GetNext() != 0) + { + p.Address = RemoveNode(i); // =(RAR_MEM_BLK*)RemoveNode(i); + p.InsertAt(s0); // p->insertAt(&s0); + p.Stamp = 0xFFFF; // p->Stamp=0xFFFF; + p.SetNu(_indx2Units[i]); // p->NU=Indx2Units[i]; + } + } + for (p.Address = s0.GetNext(); p.Address != s0.Address; p.Address = p.GetNext()) + { + // while ((p1=MBPtr(p,p->NU))->Stamp == 0xFFFF && int(p->NU)+p1->NU + // < 0x10000) + // Bug fixed + p1.Address = MbPtr(p.Address, p.GetNu()); + while (p1.Stamp == 0xFFFF && p.GetNu() + p1.GetNu() < 0x10000) + { + p1.Remove(); + p.SetNu(p.GetNu() + p1.GetNu()); // ->NU += p1->NU; + p1.Address = MbPtr(p.Address, p.GetNu()); + } + } + + // while ((p=s0.next) != &s0) + // Bug fixed + p.Address = s0.GetNext(); + while (p.Address != s0.Address) + { + for (p.Remove(), sz = p.GetNu(); sz > 128; sz -= 128, p.Address = MbPtr(p.Address, 128)) + { + InsertNode(p.Address, N_INDEXES - 1); + } + if (_indx2Units[i = _units2Indx[sz - 1]] != sz) + { + k = sz - _indx2Units[--i]; + InsertNode(MbPtr(p.Address, sz - k), k - 1); + } + InsertNode(p.Address, i); + p.Address = s0.GetNext(); + } + } + + private int AllocUnitsRare(int indx) + { + if (_glueCount == 0) + { + _glueCount = 255; + GlueFreeBlocks(); + if (_freeList[indx].GetNext() != 0) + { + return RemoveNode(indx); + } + } + int i = indx; + do + { + if (++i == N_INDEXES) + { + _glueCount--; + i = U2B(_indx2Units[indx]); + int j = FIXED_UNIT_SIZE * _indx2Units[indx]; + if (_fakeUnitsStart - _pText > j) + { + _fakeUnitsStart -= j; + _unitsStart -= i; + return _unitsStart; + } + return (0); + } + } + while (_freeList[i].GetNext() == 0); + int retVal = RemoveNode(i); + SplitBlock(retVal, i, indx); + return retVal; + } + + public virtual int AllocUnits(int nu) + { + int indx = _units2Indx[nu - 1]; + if (_freeList[indx].GetNext() != 0) + { + return RemoveNode(indx); + } + int retVal = _loUnit; + _loUnit += U2B(_indx2Units[indx]); + if (_loUnit <= _hiUnit) + { + return retVal; + } + _loUnit -= U2B(_indx2Units[indx]); + return AllocUnitsRare(indx); + } + + public virtual int AllocContext() + { + if (_hiUnit != _loUnit) + { + return (_hiUnit -= UNIT_SIZE); + } + if (_freeList[0].GetNext() != 0) + { + return RemoveNode(0); + } + return AllocUnitsRare(0); + } + + public virtual int ExpandUnits(int oldPtr, int oldNu) + { + int i0 = _units2Indx[oldNu - 1]; + int i1 = _units2Indx[oldNu - 1 + 1]; + if (i0 == i1) + { + return oldPtr; + } + int ptr = AllocUnits(oldNu + 1); + if (ptr != 0) + { + // memcpy(ptr,OldPtr,U2B(OldNU)); + Array.Copy(_heap, oldPtr, _heap, ptr, U2B(oldNu)); + InsertNode(oldPtr, i0); + } + return ptr; + } + + public virtual int ShrinkUnits(int oldPtr, int oldNu, int newNu) + { + // System.out.println("SubAllocator.shrinkUnits(" + OldPtr + ", " + + // OldNU + ", " + NewNU + ")"); + int i0 = _units2Indx[oldNu - 1]; + int i1 = _units2Indx[newNu - 1]; + if (i0 == i1) + { + return oldPtr; + } + if (_freeList[i1].GetNext() != 0) + { + int ptr = RemoveNode(i1); + + // memcpy(ptr,OldPtr,U2B(NewNU)); + // for (int i = 0; i < U2B(NewNU); i++) { + // heap[ptr + i] = heap[OldPtr + i]; + // } + Array.Copy(_heap, oldPtr, _heap, ptr, U2B(newNu)); + InsertNode(oldPtr, i0); + return ptr; + } + SplitBlock(oldPtr, i0, i1); + return oldPtr; + } + + public virtual void FreeUnits(int ptr, int oldNu) + { + InsertNode(ptr, _units2Indx[oldNu - 1]); + } + + public virtual void DecPText(int dPText) + { + PText = PText - dPText; + } + + public virtual void InitSubAllocator() + { + int i, k; + Utility.Fill(_heap, _freeListPos, _freeListPos + SizeOfFreeList(), (byte)0); + + _pText = _heapStart; + + int size2 = FIXED_UNIT_SIZE * (_subAllocatorSize / 8 / FIXED_UNIT_SIZE * 7); + int realSize2 = size2 / FIXED_UNIT_SIZE * UNIT_SIZE; + int size1 = _subAllocatorSize - size2; + int realSize1 = size1 / FIXED_UNIT_SIZE * UNIT_SIZE + size1 % FIXED_UNIT_SIZE; + _hiUnit = _heapStart + _subAllocatorSize; + _loUnit = _unitsStart = _heapStart + realSize1; + _fakeUnitsStart = _heapStart + size1; + _hiUnit = _loUnit + realSize2; + + for (i = 0, k = 1; i < N1; i++, k += 1) + { + _indx2Units[i] = k & 0xff; + } + for (k++; i < N1 + N2; i++, k += 2) + { + _indx2Units[i] = k & 0xff; + } + for (k++; i < N1 + N2 + N3; i++, k += 3) + { + _indx2Units[i] = k & 0xff; + } + for (k++; i < (N1 + N2 + N3 + N4); i++, k += 4) + { + _indx2Units[i] = k & 0xff; + } + + for (_glueCount = 0, k = 0, i = 0; k < 128; k++) + { + i += ((_indx2Units[i] < (k + 1)) ? 1 : 0); + _units2Indx[k] = i & 0xff; + } + } + + private int SizeOfFreeList() + { + return _freeList.Length * RarNode.SIZE; + } + + // Debug + // public void dumpHeap() { + // File file = new File("P:\\test\\heapdumpj"); + // OutputStream out = null; + // try { + // out = new FileOutputStream(file); + // out.write(heap, heapStart, heapEnd - heapStart); + // out.flush(); + // System.out.println("Heap dumped to " + file.getAbsolutePath()); + // } + // catch (IOException e) { + // e.printStackTrace(); + // } + // finally { + // FileUtil.close(out); + // } + // } + + // Debug + public override String ToString() + { + StringBuilder buffer = new StringBuilder(); + buffer.Append("SubAllocator["); + buffer.Append("\n subAllocatorSize="); + buffer.Append(_subAllocatorSize); + buffer.Append("\n glueCount="); + buffer.Append(_glueCount); + buffer.Append("\n heapStart="); + buffer.Append(_heapStart); + buffer.Append("\n loUnit="); + buffer.Append(_loUnit); + buffer.Append("\n hiUnit="); + buffer.Append(_hiUnit); + buffer.Append("\n pText="); + buffer.Append(_pText); + buffer.Append("\n unitsStart="); + buffer.Append(_unitsStart); + buffer.Append("\n]"); + return buffer.ToString(); + } + + static SubAllocator() + { + UNIT_SIZE = Math.Max(PpmContext.SIZE, RarMemBlock.SIZE); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Allocator.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Allocator.cs new file mode 100644 index 0000000000..1f8849aa2e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Allocator.cs @@ -0,0 +1,489 @@ +namespace SharpCompress.Compressors.PPMd.I1 +{ + /// Allocate a single, large array and then provide sections of this array to callers. Callers are provided with + /// instances of (which simply contain a single address value, representing a location + /// in the large array). Callers can then cast to one of the following structures (all + /// of which also simply contain a single address value): + internal class Allocator + { + private const uint UNIT_SIZE = 12; + private const uint LOCAL_OFFSET = 4; // reserve the first four bytes for Pointer.Zero + private const uint NODE_OFFSET = LOCAL_OFFSET + MemoryNode.SIZE; // reserve space for a single memory node + + private const uint HEAP_OFFSET = NODE_OFFSET + INDEX_COUNT * MemoryNode.SIZE; + + // reserve space for the array of memory nodes + + private const uint N1 = 4; + private const uint N2 = 4; + private const uint N3 = 4; + private const uint N4 = (128 + 3 - 1 * N1 - 2 * N2 - 3 * N3) / 4; + private const uint INDEX_COUNT = N1 + N2 + N3 + N4; + + private static readonly byte[] INDEX_TO_UNITS; + private static readonly byte[] UNITS_TO_INDEX; + + public uint _allocatorSize; + public uint _glueCount; + public Pointer _baseUnit; + public Pointer _lowUnit; + public Pointer _highUnit; + public Pointer _text; + public Pointer _heap; + public MemoryNode[] _memoryNodes; + + public byte[] _memory; + + /// + /// Initializes static read-only arrays used by the . + /// + static Allocator() + { + // Construct the static index to units lookup array. It will contain the following values. + // + // 1 2 3 4 6 8 10 12 15 18 21 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100 104 108 + // 112 116 120 124 128 + + uint index; + uint unitCount; + + INDEX_TO_UNITS = new byte[INDEX_COUNT]; + + for (index = 0, unitCount = 1; index < N1; index++, unitCount += 1) + { + INDEX_TO_UNITS[index] = (byte)unitCount; + } + + for (unitCount++; index < N1 + N2; index++, unitCount += 2) + { + INDEX_TO_UNITS[index] = (byte)unitCount; + } + + for (unitCount++; index < N1 + N2 + N3; index++, unitCount += 3) + { + INDEX_TO_UNITS[index] = (byte)unitCount; + } + + for (unitCount++; index < N1 + N2 + N3 + N4; index++, unitCount += 4) + { + INDEX_TO_UNITS[index] = (byte)unitCount; + } + + // Construct the static units to index lookup array. It will contain the following values. + // + // 00 01 02 03 04 04 05 05 06 06 07 07 08 08 08 09 09 09 10 10 10 11 11 11 12 12 12 12 13 13 13 13 + // 14 14 14 14 15 15 15 15 16 16 16 16 17 17 17 17 18 18 18 18 19 19 19 19 20 20 20 20 21 21 21 21 + // 22 22 22 22 23 23 23 23 24 24 24 24 25 25 25 25 26 26 26 26 27 27 27 27 28 28 28 28 29 29 29 29 + // 30 30 30 30 31 31 31 31 32 32 32 32 33 33 33 33 34 34 34 34 35 35 35 35 36 36 36 36 37 37 37 37 + + UNITS_TO_INDEX = new byte[128]; + + for (unitCount = index = 0; unitCount < 128; unitCount++) + { + index += (uint)((INDEX_TO_UNITS[index] < unitCount + 1) ? 1 : 0); + UNITS_TO_INDEX[unitCount] = (byte)index; + } + } + + #region Public Methods + + public Allocator() + { + _memoryNodes = new MemoryNode[INDEX_COUNT]; + } + + /// + /// Initialize or reset the memory allocator (so that the single, large array can be re-used without destroying + /// and re-creating it). + /// + public void Initialize() + { + for (int index = 0; index < INDEX_COUNT; index++) + { + _memoryNodes[index] = new MemoryNode((uint)(NODE_OFFSET + index * MemoryNode.SIZE), _memory); + _memoryNodes[index].Stamp = 0; + _memoryNodes[index].Next = MemoryNode.ZERO; + _memoryNodes[index].UnitCount = 0; + } + + _text = _heap; + + uint difference = UNIT_SIZE * (_allocatorSize / 8 / UNIT_SIZE * 7); + + _highUnit = _heap + _allocatorSize; + _lowUnit = _highUnit - difference; + _baseUnit = _highUnit - difference; + + _glueCount = 0; + } + + /// + /// Start the allocator (create a single, large array of bytes). + /// + /// + /// Note that .NET will create that array on the large object heap (because it is so large). + /// + /// + public void Start(int allocatorSize) + { + uint size = (uint)allocatorSize; + if (_allocatorSize != size) + { + Stop(); + _memory = new byte[HEAP_OFFSET + size]; // the single, large array of bytes + _heap = new Pointer(HEAP_OFFSET, _memory); // reserve bytes in the range 0 .. HeapOffset - 1 + _allocatorSize = size; + } + } + + /// + /// Stop the allocator (free the single, large array of bytes). This can safely be called multiple times (without + /// intervening calls to ). + /// + /// + /// Because the array is on the large object heap it may not be freed immediately. + /// + public void Stop() + { + if (_allocatorSize != 0) + { + _allocatorSize = 0; + _memory = null; + _heap = Pointer.ZERO; + } + } + + /// + /// Determine how much memory (from the single, large array) is currenly in use. + /// + /// + public uint GetMemoryUsed() + { + uint memoryUsed = _allocatorSize - (_highUnit - _lowUnit) - (_baseUnit - _text); + for (uint index = 0; index < INDEX_COUNT; index++) + { + memoryUsed -= UNIT_SIZE * INDEX_TO_UNITS[index] * _memoryNodes[index].Stamp; + } + return memoryUsed; + } + + /// + /// Allocate a given number of units from the single, large array. Each unit is bytes + /// in size. + /// + /// + /// + public Pointer AllocateUnits(uint unitCount) + { + uint index = UNITS_TO_INDEX[unitCount - 1]; + if (_memoryNodes[index].Available) + { + return _memoryNodes[index].Remove(); + } + + Pointer allocatedBlock = _lowUnit; + _lowUnit += INDEX_TO_UNITS[index] * UNIT_SIZE; + if (_lowUnit <= _highUnit) + { + return allocatedBlock; + } + + _lowUnit -= INDEX_TO_UNITS[index] * UNIT_SIZE; + return AllocateUnitsRare(index); + } + + /// + /// Allocate enough space for a PpmContext instance in the single, large array. + /// + /// + public Pointer AllocateContext() + { + if (_highUnit != _lowUnit) + { + return (_highUnit -= UNIT_SIZE); + } + if (_memoryNodes[0].Available) + { + return _memoryNodes[0].Remove(); + } + return AllocateUnitsRare(0); + } + + /// + /// Increase the size of an existing allocation (represented by a ). + /// + /// + /// + /// + public Pointer ExpandUnits(Pointer oldPointer, uint oldUnitCount) + { + uint oldIndex = UNITS_TO_INDEX[oldUnitCount - 1]; + uint newIndex = UNITS_TO_INDEX[oldUnitCount]; + + if (oldIndex == newIndex) + { + return oldPointer; + } + + Pointer pointer = AllocateUnits(oldUnitCount + 1); + + if (pointer != Pointer.ZERO) + { + CopyUnits(pointer, oldPointer, oldUnitCount); + _memoryNodes[oldIndex].Insert(oldPointer, oldUnitCount); + } + + return pointer; + } + + /// + /// Decrease the size of an existing allocation (represented by a ). + /// + /// + /// + /// + /// + public Pointer ShrinkUnits(Pointer oldPointer, uint oldUnitCount, uint newUnitCount) + { + uint oldIndex = UNITS_TO_INDEX[oldUnitCount - 1]; + uint newIndex = UNITS_TO_INDEX[newUnitCount - 1]; + + if (oldIndex == newIndex) + { + return oldPointer; + } + + if (_memoryNodes[newIndex].Available) + { + Pointer pointer = _memoryNodes[newIndex].Remove(); + CopyUnits(pointer, oldPointer, newUnitCount); + _memoryNodes[oldIndex].Insert(oldPointer, INDEX_TO_UNITS[oldIndex]); + return pointer; + } + SplitBlock(oldPointer, oldIndex, newIndex); + return oldPointer; + } + + /// + /// Free previously allocated space (the location and amount of space to free must be specified by using + /// a to indicate the location and a number of units to indicate the amount). + /// + /// + /// + public void FreeUnits(Pointer pointer, uint unitCount) + { + uint index = UNITS_TO_INDEX[unitCount - 1]; + _memoryNodes[index].Insert(pointer, INDEX_TO_UNITS[index]); + } + + public void SpecialFreeUnits(Pointer pointer) + { + if (pointer != _baseUnit) + { + _memoryNodes[0].Insert(pointer, 1); + } + else + { + MemoryNode memoryNode = pointer; + memoryNode.Stamp = uint.MaxValue; + _baseUnit += UNIT_SIZE; + } + } + + public Pointer MoveUnitsUp(Pointer oldPointer, uint unitCount) + { + uint index = UNITS_TO_INDEX[unitCount - 1]; + + if (oldPointer > _baseUnit + 16 * 1024 || oldPointer > _memoryNodes[index].Next) + { + return oldPointer; + } + + Pointer pointer = _memoryNodes[index].Remove(); + CopyUnits(pointer, oldPointer, unitCount); + unitCount = INDEX_TO_UNITS[index]; + + if (oldPointer != _baseUnit) + { + _memoryNodes[index].Insert(oldPointer, unitCount); + } + else + { + _baseUnit += unitCount * UNIT_SIZE; + } + + return pointer; + } + + /// + /// Expand the space allocated (in the single, large array) for the bytes of the data (ie. the "text") that is + /// being encoded or decoded. + /// + public void ExpandText() + { + MemoryNode memoryNode; + uint[] counts = new uint[INDEX_COUNT]; + + while ((memoryNode = _baseUnit).Stamp == uint.MaxValue) + { + _baseUnit = memoryNode + memoryNode.UnitCount; + counts[UNITS_TO_INDEX[memoryNode.UnitCount - 1]]++; + memoryNode.Stamp = 0; + } + + for (uint index = 0; index < INDEX_COUNT; index++) + { + for (memoryNode = _memoryNodes[index]; counts[index] != 0; memoryNode = memoryNode.Next) + { + while (memoryNode.Next.Stamp == 0) + { + memoryNode.Unlink(); + _memoryNodes[index].Stamp--; + if (--counts[index] == 0) + { + break; + } + } + } + } + } + + #endregion + + #region Private Methods + + private Pointer AllocateUnitsRare(uint index) + { + if (_glueCount == 0) + { + GlueFreeBlocks(); + if (_memoryNodes[index].Available) + { + return _memoryNodes[index].Remove(); + } + } + + uint oldIndex = index; + do + { + if (++oldIndex == INDEX_COUNT) + { + _glueCount--; + oldIndex = INDEX_TO_UNITS[index] * UNIT_SIZE; + return (_baseUnit - _text > oldIndex) ? (_baseUnit -= oldIndex) : Pointer.ZERO; + } + } + while (!_memoryNodes[oldIndex].Available); + + Pointer allocatedBlock = _memoryNodes[oldIndex].Remove(); + SplitBlock(allocatedBlock, oldIndex, index); + return allocatedBlock; + } + + private void SplitBlock(Pointer pointer, uint oldIndex, uint newIndex) + { + uint unitCountDifference = (uint)(INDEX_TO_UNITS[oldIndex] - INDEX_TO_UNITS[newIndex]); + Pointer newPointer = pointer + INDEX_TO_UNITS[newIndex] * UNIT_SIZE; + + uint index = UNITS_TO_INDEX[unitCountDifference - 1]; + if (INDEX_TO_UNITS[index] != unitCountDifference) + { + uint unitCount = INDEX_TO_UNITS[--index]; + _memoryNodes[index].Insert(newPointer, unitCount); + newPointer += unitCount * UNIT_SIZE; + unitCountDifference -= unitCount; + } + + _memoryNodes[UNITS_TO_INDEX[unitCountDifference - 1]].Insert(newPointer, unitCountDifference); + } + + private void GlueFreeBlocks() + { + MemoryNode memoryNode = new MemoryNode(LOCAL_OFFSET, _memory); + memoryNode.Stamp = 0; + memoryNode.Next = MemoryNode.ZERO; + memoryNode.UnitCount = 0; + + MemoryNode memoryNode0; + MemoryNode memoryNode1; + MemoryNode memoryNode2; + + if (_lowUnit != _highUnit) + { + _lowUnit[0] = 0; + } + + // Find all unused memory nodes. + + memoryNode1 = memoryNode; + for (uint index = 0; index < INDEX_COUNT; index++) + { + while (_memoryNodes[index].Available) + { + memoryNode0 = _memoryNodes[index].Remove(); + if (memoryNode0.UnitCount != 0) + { + while ((memoryNode2 = memoryNode0 + memoryNode0.UnitCount).Stamp == uint.MaxValue) + { + memoryNode0.UnitCount = memoryNode0.UnitCount + memoryNode2.UnitCount; + memoryNode2.UnitCount = 0; + } + memoryNode1.Link(memoryNode0); + memoryNode1 = memoryNode0; + } + } + } + + // Coalesce the memory represented by the unused memory nodes. + + while (memoryNode.Available) + { + memoryNode0 = memoryNode.Remove(); + uint unitCount = memoryNode0.UnitCount; + if (unitCount != 0) + { + for (; unitCount > 128; unitCount -= 128, memoryNode0 += 128) + { + _memoryNodes[INDEX_COUNT - 1].Insert(memoryNode0, 128); + } + + uint index = UNITS_TO_INDEX[unitCount - 1]; + if (INDEX_TO_UNITS[index] != unitCount) + { + uint unitCountDifference = unitCount - INDEX_TO_UNITS[--index]; + _memoryNodes[unitCountDifference - 1].Insert(memoryNode0 + (unitCount - unitCountDifference), + unitCountDifference); + } + + _memoryNodes[index].Insert(memoryNode0, INDEX_TO_UNITS[index]); + } + } + + _glueCount = 1 << 13; + } + + private void CopyUnits(Pointer target, Pointer source, uint unitCount) + { + do + { + target[0] = source[0]; + target[1] = source[1]; + target[2] = source[2]; + target[3] = source[3]; + target[4] = source[4]; + target[5] = source[5]; + target[6] = source[6]; + target[7] = source[7]; + target[8] = source[8]; + target[9] = source[9]; + target[10] = source[10]; + target[11] = source[11]; + target += UNIT_SIZE; + source += UNIT_SIZE; + } + while (--unitCount != 0); + } + + #endregion + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Coder.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Coder.cs new file mode 100644 index 0000000000..a5c0d86211 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Coder.cs @@ -0,0 +1,104 @@ +#region Using + +using System.IO; + +#endregion + +namespace SharpCompress.Compressors.PPMd.I1 +{ + /// + /// A simple range coder. + /// + /// + /// Note that in most cases fields are used rather than properties for performance reasons (for example, + /// is a field rather than a property). + /// + internal class Coder + { + private const uint RANGE_TOP = 1 << 24; + private const uint RANGE_BOTTOM = 1 << 15; + private uint _low; + private uint _code; + private uint _range; + + public uint _lowCount; + public uint _highCount; + public uint _scale; + + public void RangeEncoderInitialize() + { + _low = 0; + _range = uint.MaxValue; + } + + public void RangeEncoderNormalize(Stream stream) + { + while ((_low ^ (_low + _range)) < RANGE_TOP || + _range < RANGE_BOTTOM && ((_range = (uint)-_low & (RANGE_BOTTOM - 1)) != 0 || true)) + { + stream.WriteByte((byte)(_low >> 24)); + _range <<= 8; + _low <<= 8; + } + } + + public void RangeEncodeSymbol() + { + _low += _lowCount * (_range /= _scale); + _range *= _highCount - _lowCount; + } + + public void RangeShiftEncodeSymbol(int rangeShift) + { + _low += _lowCount * (_range >>= rangeShift); + _range *= _highCount - _lowCount; + } + + public void RangeEncoderFlush(Stream stream) + { + for (uint index = 0; index < 4; index++) + { + stream.WriteByte((byte)(_low >> 24)); + _low <<= 8; + } + } + + public void RangeDecoderInitialize(Stream stream) + { + _low = 0; + _code = 0; + _range = uint.MaxValue; + for (uint index = 0; index < 4; index++) + { + _code = (_code << 8) | (byte)stream.ReadByte(); + } + } + + public void RangeDecoderNormalize(Stream stream) + { + while ((_low ^ (_low + _range)) < RANGE_TOP || + _range < RANGE_BOTTOM && ((_range = (uint)-_low & (RANGE_BOTTOM - 1)) != 0 || true)) + { + _code = (_code << 8) | (byte)stream.ReadByte(); + _range <<= 8; + _low <<= 8; + } + } + + public uint RangeGetCurrentCount() + { + return (_code - _low) / (_range /= _scale); + } + + public uint RangeGetCurrentShiftCount(int rangeShift) + { + return (_code - _low) / (_range >>= rangeShift); + } + + public void RangeRemoveSubrange() + { + _low += _range * _lowCount; + _range *= _highCount - _lowCount; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/MemoryNode.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/MemoryNode.cs new file mode 100644 index 0000000000..cded381ee8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/MemoryNode.cs @@ -0,0 +1,247 @@ +#region Using + + + +#endregion + +namespace SharpCompress.Compressors.PPMd.I1 +{ + /// + /// A structure containing a single address. The address represents a location in the + /// array. That location in the array contains information itself describing a section + /// of the array (ie. a block of memory). + /// + /// + /// + /// This must be a structure rather than a class because several places in the associated code assume that + /// is a value type (meaning that assignment creates a completely new copy of + /// the instance rather than just copying a reference to the same instance). + /// + /// + /// MemoryNode + /// 4 Stamp + /// 4 Next + /// 4 UnitCount + /// + /// + /// Note that is a field rather than a property for performance reasons. + /// + /// + internal struct MemoryNode + { + public uint _address; + public byte[] _memory; + public static readonly MemoryNode ZERO = new MemoryNode(0, null); + public const int SIZE = 12; + + /// + /// Initializes a new instance of the structure. + /// + public MemoryNode(uint address, byte[] memory) + { + _address = address; + _memory = memory; + } + + /// + /// Gets or sets the stamp. + /// + public uint Stamp + { + get => _memory[_address] | ((uint)_memory[_address + 1]) << 8 | ((uint)_memory[_address + 2]) << 16 | + ((uint)_memory[_address + 3]) << 24; + set + { + _memory[_address] = (byte)value; + _memory[_address + 1] = (byte)(value >> 8); + _memory[_address + 2] = (byte)(value >> 16); + _memory[_address + 3] = (byte)(value >> 24); + } + } + + /// + /// Gets or sets the next memory node. + /// + public MemoryNode Next + { + get => new MemoryNode( + _memory[_address + 4] | ((uint)_memory[_address + 5]) << 8 | + ((uint)_memory[_address + 6]) << 16 | ((uint)_memory[_address + 7]) << 24, _memory); + set + { + _memory[_address + 4] = (byte)value._address; + _memory[_address + 5] = (byte)(value._address >> 8); + _memory[_address + 6] = (byte)(value._address >> 16); + _memory[_address + 7] = (byte)(value._address >> 24); + } + } + + /// + /// Gets or sets the unit count. + /// + public uint UnitCount + { + get => _memory[_address + 8] | ((uint)_memory[_address + 9]) << 8 | + ((uint)_memory[_address + 10]) << 16 | ((uint)_memory[_address + 11]) << 24; + set + { + _memory[_address + 8] = (byte)value; + _memory[_address + 9] = (byte)(value >> 8); + _memory[_address + 10] = (byte)(value >> 16); + _memory[_address + 11] = (byte)(value >> 24); + } + } + + /// + /// Gets whether there is a next memory node available. + /// + public bool Available => Next._address != 0; + + /// + /// Link in the provided memory node. + /// + /// + public void Link(MemoryNode memoryNode) + { + memoryNode.Next = Next; + Next = memoryNode; + } + + /// + /// Unlink this memory node. + /// + public void Unlink() + { + Next = Next.Next; + } + + /// + /// Insert the memory node into the linked list. + /// + /// + /// + public void Insert(MemoryNode memoryNode, uint unitCount) + { + Link(memoryNode); + memoryNode.Stamp = uint.MaxValue; + memoryNode.UnitCount = unitCount; + Stamp++; + } + + /// + /// Remove this memory node from the linked list. + /// + /// + public MemoryNode Remove() + { + MemoryNode next = Next; + Unlink(); + Stamp--; + return next; + } + + /// + /// Allow a pointer to be implicitly converted to a memory node. + /// + /// + /// + public static implicit operator MemoryNode(Pointer pointer) + { + return new MemoryNode(pointer._address, pointer._memory); + } + + /// + /// Allow pointer-like addition on a memory node. + /// + /// + /// + /// + public static MemoryNode operator +(MemoryNode memoryNode, int offset) + { + memoryNode._address = (uint)(memoryNode._address + offset * SIZE); + return memoryNode; + } + + /// + /// Allow pointer-like addition on a memory node. + /// + /// + /// + /// + public static MemoryNode operator +(MemoryNode memoryNode, uint offset) + { + memoryNode._address += offset * SIZE; + return memoryNode; + } + + /// + /// Allow pointer-like subtraction on a memory node. + /// + /// + /// + /// + public static MemoryNode operator -(MemoryNode memoryNode, int offset) + { + memoryNode._address = (uint)(memoryNode._address - offset * SIZE); + return memoryNode; + } + + /// + /// Allow pointer-like subtraction on a memory node. + /// + /// + /// + /// + public static MemoryNode operator -(MemoryNode memoryNode, uint offset) + { + memoryNode._address -= offset * SIZE; + return memoryNode; + } + + /// + /// Compare two memory nodes. + /// + /// + /// + /// + public static bool operator ==(MemoryNode memoryNode1, MemoryNode memoryNode2) + { + return memoryNode1._address == memoryNode2._address; + } + + /// + /// Compare two memory nodes. + /// + /// + /// + /// + public static bool operator !=(MemoryNode memoryNode1, MemoryNode memoryNode2) + { + return memoryNode1._address != memoryNode2._address; + } + + /// + /// Indicates whether this instance and a specified object are equal. + /// + /// true if obj and this instance are the same type and represent the same value; otherwise, false. + /// Another object to compare to. + public override bool Equals(object obj) + { + if (obj is MemoryNode) + { + MemoryNode memoryNode = (MemoryNode)obj; + return memoryNode._address == _address; + } + return base.Equals(obj); + } + + /// + /// Returns the hash code for this instance. + /// + /// A 32-bit signed integer that is the hash code for this instance. + public override int GetHashCode() + { + return _address.GetHashCode(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Model.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Model.cs new file mode 100644 index 0000000000..11d065c16b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Model.cs @@ -0,0 +1,923 @@ +#region Using + +using System; +using System.IO; + +#endregion + +// This is a port of Dmitry Shkarin's PPMd Variant I Revision 1. +// Ported by Michael Bone (mjbone03@yahoo.com.au). + +namespace SharpCompress.Compressors.PPMd.I1 +{ + /// + /// The model. + /// + internal partial class Model + { + public const uint SIGNATURE = 0x84acaf8fU; + public const char VARIANT = 'I'; + public const int MAXIMUM_ORDER = 16; // maximum allowed model order + + private const byte UPPER_FREQUENCY = 5; + private const byte INTERVAL_BIT_COUNT = 7; + private const byte PERIOD_BIT_COUNT = 7; + private const byte TOTAL_BIT_COUNT = INTERVAL_BIT_COUNT + PERIOD_BIT_COUNT; + private const uint INTERVAL = 1 << INTERVAL_BIT_COUNT; + private const uint BINARY_SCALE = 1 << TOTAL_BIT_COUNT; + private const uint MAXIMUM_FREQUENCY = 124; + private const uint ORDER_BOUND = 9; + + private readonly See2Context[,] _see2Contexts; + private readonly See2Context _emptySee2Context; + private PpmContext _maximumContext; + private readonly ushort[,] _binarySummary = new ushort[25, 64]; // binary SEE-contexts + private readonly byte[] _numberStatisticsToBinarySummaryIndex = new byte[256]; + private readonly byte[] _probabilities = new byte[260]; + private readonly byte[] _characterMask = new byte[256]; + private byte _escapeCount; + private int _modelOrder; + private int _orderFall; + private int _initialEscape; + private int _initialRunLength; + private int _runLength; + private byte _previousSuccess; + private byte _numberMasked; + private ModelRestorationMethod _method; + private PpmState _foundState; // found next state transition + + private Allocator _allocator; + private Coder _coder; + private PpmContext _minimumContext; + private byte _numberStatistics; + private readonly PpmState[] _decodeStates = new PpmState[256]; + + private static readonly ushort[] INITIAL_BINARY_ESCAPES = + { + 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, + 0x6051 + }; + + private static readonly byte[] EXPONENTIAL_ESCAPES = {25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2}; + + #region Public Methods + + public Model() + { + // Construct the conversion table for number statistics. Initially it will contain the following values. + // + // 0 2 4 4 4 4 4 4 4 4 4 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 + // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 + // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 + // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 + // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 + // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 + // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 + // 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 + + _numberStatisticsToBinarySummaryIndex[0] = 2 * 0; + _numberStatisticsToBinarySummaryIndex[1] = 2 * 1; + for (int index = 2; index < 11; index++) + { + _numberStatisticsToBinarySummaryIndex[index] = 2 * 2; + } + for (int index = 11; index < 256; index++) + { + _numberStatisticsToBinarySummaryIndex[index] = 2 * 3; + } + + // Construct the probability table. Initially it will contain the following values (depending on the value of + // the upper frequency). + // + // 00 01 02 03 04 05 06 06 07 07 07 08 08 08 08 09 09 09 09 09 10 10 10 10 10 10 11 11 11 11 11 11 + // 11 12 12 12 12 12 12 12 12 13 13 13 13 13 13 13 13 13 14 14 14 14 14 14 14 14 14 14 15 15 15 15 + // 15 15 15 15 15 15 15 16 16 16 16 16 16 16 16 16 16 16 16 17 17 17 17 17 17 17 17 17 17 17 17 17 + // 18 18 18 18 18 18 18 18 18 18 18 18 18 18 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 20 20 20 + // 20 20 20 20 20 20 20 20 20 20 20 20 20 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 22 22 + // 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 + // 23 23 23 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 25 25 25 25 25 25 25 25 25 + // 25 25 25 25 25 25 25 25 25 25 25 25 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 + // 26 26 27 27 + + uint count = 1; + uint step = 1; + uint probability = UPPER_FREQUENCY; + + for (int index = 0; index < UPPER_FREQUENCY; index++) + { + _probabilities[index] = (byte)index; + } + + for (int index = UPPER_FREQUENCY; index < 260; index++) + { + _probabilities[index] = (byte)probability; + count--; + if (count == 0) + { + step++; + count = step; + probability++; + } + } + + // Create the context array. + + _see2Contexts = new See2Context[24, 32]; + for (int index1 = 0; index1 < 24; index1++) + { + for (int index2 = 0; index2 < 32; index2++) + { + _see2Contexts[index1, index2] = new See2Context(); + } + } + + // Set the signature (identifying the algorithm). + + _emptySee2Context = new See2Context(); + _emptySee2Context._summary = (ushort)(SIGNATURE & 0x0000ffff); + _emptySee2Context._shift = (byte)((SIGNATURE >> 16) & 0x000000ff); + _emptySee2Context._count = (byte)(SIGNATURE >> 24); + } + + /// + /// Encode (ie. compress) a given source stream, writing the encoded result to the target stream. + /// + public void Encode(Stream target, Stream source, PpmdProperties properties) + { + if (target == null) + { + throw new ArgumentNullException(nameof(target)); + } + + if (source == null) + { + throw new ArgumentNullException(nameof(source)); + } + + EncodeStart(properties); + EncodeBlock(target, source, true); + } + + internal Coder EncodeStart(PpmdProperties properties) + { + _allocator = properties._allocator; + _coder = new Coder(); + _coder.RangeEncoderInitialize(); + StartModel(properties.ModelOrder, properties.RestorationMethod); + return _coder; + } + + internal void EncodeBlock(Stream target, Stream source, bool final) + { + while (true) + { + _minimumContext = _maximumContext; + _numberStatistics = _minimumContext.NumberStatistics; + + int c = source.ReadByte(); + if (c < 0 && !final) + { + return; + } + + if (_numberStatistics != 0) + { + EncodeSymbol1(c, _minimumContext); + _coder.RangeEncodeSymbol(); + } + else + { + EncodeBinarySymbol(c, _minimumContext); + _coder.RangeShiftEncodeSymbol(TOTAL_BIT_COUNT); + } + + while (_foundState == PpmState.ZERO) + { + _coder.RangeEncoderNormalize(target); + do + { + _orderFall++; + _minimumContext = _minimumContext.Suffix; + if (_minimumContext == PpmContext.ZERO) + { + goto StopEncoding; + } + } + while (_minimumContext.NumberStatistics == _numberMasked); + EncodeSymbol2(c, _minimumContext); + _coder.RangeEncodeSymbol(); + } + + if (_orderFall == 0 && (Pointer)_foundState.Successor >= _allocator._baseUnit) + { + _maximumContext = _foundState.Successor; + } + else + { + UpdateModel(_minimumContext); + if (_escapeCount == 0) + { + ClearMask(); + } + } + + _coder.RangeEncoderNormalize(target); + } + + StopEncoding: + _coder.RangeEncoderFlush(target); + } + + /// + /// Dencode (ie. decompress) a given source stream, writing the decoded result to the target stream. + /// + public void Decode(Stream target, Stream source, PpmdProperties properties) + { + if (target == null) + { + throw new ArgumentNullException(nameof(target)); + } + + if (source == null) + { + throw new ArgumentNullException(nameof(source)); + } + + DecodeStart(source, properties); + byte[] buffer = new byte[65536]; + int read; + while ((read = DecodeBlock(source, buffer, 0, buffer.Length)) != 0) + { + target.Write(buffer, 0, read); + } + } + + internal Coder DecodeStart(Stream source, PpmdProperties properties) + { + _allocator = properties._allocator; + _coder = new Coder(); + _coder.RangeDecoderInitialize(source); + StartModel(properties.ModelOrder, properties.RestorationMethod); + _minimumContext = _maximumContext; + _numberStatistics = _minimumContext.NumberStatistics; + return _coder; + } + + internal int DecodeBlock(Stream source, byte[] buffer, int offset, int count) + { + if (_minimumContext == PpmContext.ZERO) + { + return 0; + } + + int total = 0; + while (total < count) + { + if (_numberStatistics != 0) + { + DecodeSymbol1(_minimumContext); + } + else + { + DecodeBinarySymbol(_minimumContext); + } + + _coder.RangeRemoveSubrange(); + + while (_foundState == PpmState.ZERO) + { + _coder.RangeDecoderNormalize(source); + do + { + _orderFall++; + _minimumContext = _minimumContext.Suffix; + if (_minimumContext == PpmContext.ZERO) + { + goto StopDecoding; + } + } + while (_minimumContext.NumberStatistics == _numberMasked); + DecodeSymbol2(_minimumContext); + _coder.RangeRemoveSubrange(); + } + + buffer[offset] = _foundState.Symbol; + offset++; + total++; + + if (_orderFall == 0 && (Pointer)_foundState.Successor >= _allocator._baseUnit) + { + _maximumContext = _foundState.Successor; + } + else + { + UpdateModel(_minimumContext); + if (_escapeCount == 0) + { + ClearMask(); + } + } + + _minimumContext = _maximumContext; + _numberStatistics = _minimumContext.NumberStatistics; + _coder.RangeDecoderNormalize(source); + } + + StopDecoding: + return total; + } + + #endregion + + #region Private Methods + + /// + /// Initialise the model (unless the model order is set to 1 in which case the model should be cleared so that + /// the statistics are carried over, allowing "solid" mode compression). + /// + private void StartModel(int modelOrder, ModelRestorationMethod modelRestorationMethod) + { + Array.Clear(_characterMask, 0, _characterMask.Length); + _escapeCount = 1; + + // Compress in "solid" mode if the model order value is set to 1 (this will examine the current PPM context + // structures to determine the value of orderFall). + + if (modelOrder < 2) + { + _orderFall = _modelOrder; + for (PpmContext context = _maximumContext; context.Suffix != PpmContext.ZERO; context = context.Suffix) + { + _orderFall--; + } + return; + } + + _modelOrder = modelOrder; + _orderFall = modelOrder; + _method = modelRestorationMethod; + _allocator.Initialize(); + _initialRunLength = -((modelOrder < 12) ? modelOrder : 12) - 1; + _runLength = _initialRunLength; + + // Allocate the context structure. + + _maximumContext = _allocator.AllocateContext(); + _maximumContext.Suffix = PpmContext.ZERO; + _maximumContext.NumberStatistics = 255; + _maximumContext.SummaryFrequency = (ushort)(_maximumContext.NumberStatistics + 2); + _maximumContext.Statistics = _allocator.AllocateUnits(256 / 2); + + // allocates enough space for 256 PPM states (each is 6 bytes) + + _previousSuccess = 0; + for (int index = 0; index < 256; index++) + { + PpmState state = _maximumContext.Statistics[index]; + state.Symbol = (byte)index; + state.Frequency = 1; + state.Successor = PpmContext.ZERO; + } + + uint probability = 0; + for (int index1 = 0; probability < 25; probability++) + { + while (_probabilities[index1] == probability) + { + index1++; + } + for (int index2 = 0; index2 < 8; index2++) + { + _binarySummary[probability, index2] = + (ushort)(BINARY_SCALE - INITIAL_BINARY_ESCAPES[index2] / (index1 + 1)); + } + for (int index2 = 8; index2 < 64; index2 += 8) + { + for (int index3 = 0; index3 < 8; index3++) + { + _binarySummary[probability, index2 + index3] = _binarySummary[probability, index3]; + } + } + } + + probability = 0; + for (uint index1 = 0; probability < 24; probability++) + { + while (_probabilities[index1 + 3] == probability + 3) + { + index1++; + } + for (int index2 = 0; index2 < 32; index2++) + { + _see2Contexts[probability, index2].Initialize(2 * index1 + 5); + } + } + } + + private void UpdateModel(PpmContext minimumContext) + { + PpmState state = PpmState.ZERO; + PpmContext successor; + PpmContext currentContext = _maximumContext; + uint numberStatistics; + uint ns1; + uint cf; + uint sf; + uint s0; + uint foundStateFrequency = _foundState.Frequency; + byte foundStateSymbol = _foundState.Symbol; + byte symbol; + byte flag; + + PpmContext foundStateSuccessor = _foundState.Successor; + PpmContext context = minimumContext.Suffix; + + if ((foundStateFrequency < MAXIMUM_FREQUENCY / 4) && (context != PpmContext.ZERO)) + { + if (context.NumberStatistics != 0) + { + state = context.Statistics; + if (state.Symbol != foundStateSymbol) + { + do + { + symbol = state[1].Symbol; + state++; + } + while (symbol != foundStateSymbol); + if (state[0].Frequency >= state[-1].Frequency) + { + Swap(state[0], state[-1]); + state--; + } + } + cf = (uint)((state.Frequency < MAXIMUM_FREQUENCY - 9) ? 2 : 0); + state.Frequency += (byte)cf; + context.SummaryFrequency += (byte)cf; + } + else + { + state = context.FirstState; + state.Frequency += (byte)((state.Frequency < 32) ? 1 : 0); + } + } + + if (_orderFall == 0 && foundStateSuccessor != PpmContext.ZERO) + { + _foundState.Successor = CreateSuccessors(true, state, minimumContext); + if (_foundState.Successor == PpmContext.ZERO) + { + goto RestartModel; + } + _maximumContext = _foundState.Successor; + return; + } + + _allocator._text[0] = foundStateSymbol; + _allocator._text++; + successor = _allocator._text; + + if (_allocator._text >= _allocator._baseUnit) + { + goto RestartModel; + } + + if (foundStateSuccessor != PpmContext.ZERO) + { + if (foundStateSuccessor < _allocator._baseUnit) + { + foundStateSuccessor = CreateSuccessors(false, state, minimumContext); + } + } + else + { + foundStateSuccessor = ReduceOrder(state, minimumContext); + } + + if (foundStateSuccessor == PpmContext.ZERO) + { + goto RestartModel; + } + + if (--_orderFall == 0) + { + successor = foundStateSuccessor; + _allocator._text -= (_maximumContext != minimumContext) ? 1 : 0; + } + else if (_method > ModelRestorationMethod.Freeze) + { + successor = foundStateSuccessor; + _allocator._text = _allocator._heap; + _orderFall = 0; + } + + numberStatistics = minimumContext.NumberStatistics; + s0 = minimumContext.SummaryFrequency - numberStatistics - foundStateFrequency; + flag = (byte)((foundStateSymbol >= 0x40) ? 0x08 : 0x00); + for (; currentContext != minimumContext; currentContext = currentContext.Suffix) + { + ns1 = currentContext.NumberStatistics; + if (ns1 != 0) + { + if ((ns1 & 1) != 0) + { + state = _allocator.ExpandUnits(currentContext.Statistics, (ns1 + 1) >> 1); + if (state == PpmState.ZERO) + { + goto RestartModel; + } + currentContext.Statistics = state; + } + currentContext.SummaryFrequency += (ushort)((3 * ns1 + 1 < numberStatistics) ? 1 : 0); + } + else + { + state = _allocator.AllocateUnits(1); + if (state == PpmState.ZERO) + { + goto RestartModel; + } + Copy(state, currentContext.FirstState); + currentContext.Statistics = state; + if (state.Frequency < MAXIMUM_FREQUENCY / 4 - 1) + { + state.Frequency += state.Frequency; + } + else + { + state.Frequency = (byte)(MAXIMUM_FREQUENCY - 4); + } + currentContext.SummaryFrequency = + (ushort)(state.Frequency + _initialEscape + ((numberStatistics > 2) ? 1 : 0)); + } + + cf = (uint)(2 * foundStateFrequency * (currentContext.SummaryFrequency + 6)); + sf = s0 + currentContext.SummaryFrequency; + + if (cf < 6 * sf) + { + cf = (uint)(1 + ((cf > sf) ? 1 : 0) + ((cf >= 4 * sf) ? 1 : 0)); + currentContext.SummaryFrequency += 4; + } + else + { + cf = (uint)(4 + ((cf > 9 * sf) ? 1 : 0) + ((cf > 12 * sf) ? 1 : 0) + ((cf > 15 * sf) ? 1 : 0)); + currentContext.SummaryFrequency += (ushort)cf; + } + + state = currentContext.Statistics + (++currentContext.NumberStatistics); + state.Successor = successor; + state.Symbol = foundStateSymbol; + state.Frequency = (byte)cf; + currentContext.Flags |= flag; + } + + _maximumContext = foundStateSuccessor; + return; + + RestartModel: + RestoreModel(currentContext, minimumContext, foundStateSuccessor); + } + + private PpmContext CreateSuccessors(bool skip, PpmState state, PpmContext context) + { + PpmContext upBranch = _foundState.Successor; + PpmState[] states = new PpmState[MAXIMUM_ORDER]; + uint stateIndex = 0; + byte symbol = _foundState.Symbol; + + if (!skip) + { + states[stateIndex++] = _foundState; + if (context.Suffix == PpmContext.ZERO) + { + goto NoLoop; + } + } + + bool gotoLoopEntry = false; + if (state != PpmState.ZERO) + { + context = context.Suffix; + gotoLoopEntry = true; + } + + do + { + if (gotoLoopEntry) + { + gotoLoopEntry = false; + goto LoopEntry; + } + + context = context.Suffix; + if (context.NumberStatistics != 0) + { + byte temporary; + state = context.Statistics; + if (state.Symbol != symbol) + { + do + { + temporary = state[1].Symbol; + state++; + } + while (temporary != symbol); + } + temporary = (byte)((state.Frequency < MAXIMUM_FREQUENCY - 9) ? 1 : 0); + state.Frequency += temporary; + context.SummaryFrequency += temporary; + } + else + { + state = context.FirstState; + state.Frequency += + (byte)(((context.Suffix.NumberStatistics == 0) ? 1 : 0) & ((state.Frequency < 24) ? 1 : 0)); + } + + LoopEntry: + if (state.Successor != upBranch) + { + context = state.Successor; + break; + } + states[stateIndex++] = state; + } + while (context.Suffix != PpmContext.ZERO); + + NoLoop: + if (stateIndex == 0) + { + return context; + } + + byte localNumberStatistics = 0; + byte localFlags = (byte)((symbol >= 0x40) ? 0x10 : 0x00); + symbol = upBranch.NumberStatistics; + byte localSymbol = symbol; + byte localFrequency; + PpmContext localSuccessor = ((Pointer)upBranch) + 1; + localFlags |= (byte)((symbol >= 0x40) ? 0x08 : 0x00); + + if (context.NumberStatistics != 0) + { + state = context.Statistics; + if (state.Symbol != symbol) + { + byte temporary; + do + { + temporary = state[1].Symbol; + state++; + } + while (temporary != symbol); + } + uint cf = (uint)(state.Frequency - 1); + uint s0 = (uint)(context.SummaryFrequency - context.NumberStatistics - cf); + localFrequency = (byte)(1 + ((2 * cf <= s0) ? (uint)((5 * cf > s0) ? 1 : 0) : ((cf + 2 * s0 - 3) / s0))); + } + else + { + localFrequency = context.FirstStateFrequency; + } + + do + { + PpmContext currentContext = _allocator.AllocateContext(); + if (currentContext == PpmContext.ZERO) + { + return PpmContext.ZERO; + } + currentContext.NumberStatistics = localNumberStatistics; + currentContext.Flags = localFlags; + currentContext.FirstStateSymbol = localSymbol; + currentContext.FirstStateFrequency = localFrequency; + currentContext.FirstStateSuccessor = localSuccessor; + currentContext.Suffix = context; + context = currentContext; + states[--stateIndex].Successor = context; + } + while (stateIndex != 0); + + return context; + } + + private PpmContext ReduceOrder(PpmState state, PpmContext context) + { + PpmState currentState; + PpmState[] states = new PpmState[MAXIMUM_ORDER]; + uint stateIndex = 0; + PpmContext currentContext = context; + PpmContext upBranch = _allocator._text; + byte temporary; + byte symbol = _foundState.Symbol; + + states[stateIndex++] = _foundState; + _foundState.Successor = upBranch; + _orderFall++; + + bool gotoLoopEntry = false; + if (state != PpmState.ZERO) + { + context = context.Suffix; + gotoLoopEntry = true; + } + + while (true) + { + if (gotoLoopEntry) + { + gotoLoopEntry = false; + goto LoopEntry; + } + + if (context.Suffix == PpmContext.ZERO) + { + if (_method > ModelRestorationMethod.Freeze) + { + do + { + states[--stateIndex].Successor = context; + } + while (stateIndex != 0); + _allocator._text = _allocator._heap + 1; + _orderFall = 1; + } + return context; + } + + context = context.Suffix; + if (context.NumberStatistics != 0) + { + state = context.Statistics; + if (state.Symbol != symbol) + { + do + { + temporary = state[1].Symbol; + state++; + } + while (temporary != symbol); + } + temporary = (byte)((state.Frequency < MAXIMUM_FREQUENCY - 9) ? 2 : 0); + state.Frequency += temporary; + context.SummaryFrequency += temporary; + } + else + { + state = context.FirstState; + state.Frequency += (byte)((state.Frequency < 32) ? 1 : 0); + } + + LoopEntry: + if (state.Successor != PpmContext.ZERO) + { + break; + } + states[stateIndex++] = state; + state.Successor = upBranch; + _orderFall++; + } + + if (_method > ModelRestorationMethod.Freeze) + { + context = state.Successor; + do + { + states[--stateIndex].Successor = context; + } + while (stateIndex != 0); + _allocator._text = _allocator._heap + 1; + _orderFall = 1; + return context; + } + if (state.Successor <= upBranch) + { + currentState = _foundState; + _foundState = state; + state.Successor = CreateSuccessors(false, PpmState.ZERO, context); + _foundState = currentState; + } + + if (_orderFall == 1 && currentContext == _maximumContext) + { + _foundState.Successor = state.Successor; + _allocator._text--; + } + + return state.Successor; + } + + private void RestoreModel(PpmContext context, PpmContext minimumContext, PpmContext foundStateSuccessor) + { + PpmContext currentContext; + + _allocator._text = _allocator._heap; + for (currentContext = _maximumContext; currentContext != context; currentContext = currentContext.Suffix) + { + if (--currentContext.NumberStatistics == 0) + { + currentContext.Flags = + (byte) + ((currentContext.Flags & 0x10) + ((currentContext.Statistics.Symbol >= 0x40) ? 0x08 : 0x00)); + PpmState state = currentContext.Statistics; + Copy(currentContext.FirstState, state); + _allocator.SpecialFreeUnits(state); + currentContext.FirstStateFrequency = (byte)((currentContext.FirstStateFrequency + 11) >> 3); + } + else + { + Refresh((uint)((currentContext.NumberStatistics + 3) >> 1), false, currentContext); + } + } + + for (; currentContext != minimumContext; currentContext = currentContext.Suffix) + { + if (currentContext.NumberStatistics == 0) + { + currentContext.FirstStateFrequency -= (byte)(currentContext.FirstStateFrequency >> 1); + } + else if ((currentContext.SummaryFrequency += 4) > 128 + 4 * currentContext.NumberStatistics) + { + Refresh((uint)((currentContext.NumberStatistics + 2) >> 1), true, currentContext); + } + } + + if (_method > ModelRestorationMethod.Freeze) + { + _maximumContext = foundStateSuccessor; + _allocator._glueCount += (uint)(((_allocator._memoryNodes[1].Stamp & 1) == 0) ? 1 : 0); + } + else if (_method == ModelRestorationMethod.Freeze) + { + while (_maximumContext.Suffix != PpmContext.ZERO) + { + _maximumContext = _maximumContext.Suffix; + } + + RemoveBinaryContexts(0, _maximumContext); + _method = _method + 1; + _allocator._glueCount = 0; + _orderFall = _modelOrder; + } + else if (_method == ModelRestorationMethod.Restart || + _allocator.GetMemoryUsed() < (_allocator._allocatorSize >> 1)) + { + StartModel(_modelOrder, _method); + _escapeCount = 0; + } + else + { + while (_maximumContext.Suffix != PpmContext.ZERO) + { + _maximumContext = _maximumContext.Suffix; + } + + do + { + CutOff(0, _maximumContext); + _allocator.ExpandText(); + } + while (_allocator.GetMemoryUsed() > 3 * (_allocator._allocatorSize >> 2)); + + _allocator._glueCount = 0; + _orderFall = _modelOrder; + } + } + + private static void Swap(PpmState state1, PpmState state2) + { + byte swapSymbol = state1.Symbol; + byte swapFrequency = state1.Frequency; + PpmContext swapSuccessor = state1.Successor; + + state1.Symbol = state2.Symbol; + state1.Frequency = state2.Frequency; + state1.Successor = state2.Successor; + + state2.Symbol = swapSymbol; + state2.Frequency = swapFrequency; + state2.Successor = swapSuccessor; + } + + private static void Copy(PpmState state1, PpmState state2) + { + state1.Symbol = state2.Symbol; + state1.Frequency = state2.Frequency; + state1.Successor = state2.Successor; + } + + private static int Mean(int sum, int shift, int round) + { + return (sum + (1 << (shift - round))) >> shift; + } + + private void ClearMask() + { + _escapeCount = 1; + Array.Clear(_characterMask, 0, _characterMask.Length); + } + + #endregion + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/ModelRestorationMethod.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/ModelRestorationMethod.cs new file mode 100644 index 0000000000..720e9d9975 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/ModelRestorationMethod.cs @@ -0,0 +1,29 @@ +#region Using + + + +#endregion + +namespace SharpCompress.Compressors.PPMd.I1 +{ + /// + /// The method used to adjust the model when the memory limit is reached. + /// + internal enum ModelRestorationMethod + { + /// + /// Restart the model from scratch (this is the default). + /// + Restart = 0, + + /// + /// Cut off the model (nearly twice as slow). + /// + CutOff = 1, + + /// + /// Freeze the context tree (in some cases may result in poor compression). + /// + Freeze = 2 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Pointer.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Pointer.cs new file mode 100644 index 0000000000..e95574e792 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/Pointer.cs @@ -0,0 +1,365 @@ +#region Using + +using System; + +#endregion + +namespace SharpCompress.Compressors.PPMd.I1 +{ + /// + /// A structure containing a single address representing a position in the array. This + /// is intended to mimic the behaviour of a pointer in C/C++. + /// + /// + /// + /// This must be a structure rather than a class because several places in the associated code assume that + /// is a value type (meaning that assignment creates a completely new copy of the + /// instance rather than just copying a reference to the same instance). + /// + /// + /// Note that is a field rather than a property for performance reasons. + /// + /// + internal struct Pointer + { + public uint _address; + public byte[] _memory; + public static readonly Pointer ZERO = new Pointer(0, null); + public const int SIZE = 1; + + /// + /// Initializes a new instance of the structure. + /// + public Pointer(uint address, byte[] memory) + { + _address = address; + _memory = memory; + } + + /// + /// Gets or sets the byte at the given . + /// + /// + /// + public byte this[int offset] + { + get + { +#if DEBUG + if (_address == 0) + { + throw new InvalidOperationException("The pointer being indexed is a null pointer."); + } +#endif + return _memory[_address + offset]; + } + set + { +#if DEBUG + if (_address == 0) + { + throw new InvalidOperationException("The pointer being indexed is a null pointer."); + } +#endif + _memory[_address + offset] = value; + } + } + + /// + /// Allow a to be implicitly converted to a . + /// + /// + /// + public static implicit operator Pointer(MemoryNode memoryNode) + { + return new Pointer(memoryNode._address, memoryNode._memory); + } + + /// + /// Allow a to be implicitly converted to a . + /// + /// + /// + public static implicit operator Pointer(Model.PpmContext context) + { + return new Pointer(context._address, context._memory); + } + + /// + /// Allow a to be implicitly converted to a . + /// + /// + /// + public static implicit operator Pointer(PpmState state) + { + return new Pointer(state._address, state._memory); + } + + /// + /// Increase the address of a pointer by the given number of bytes. + /// + /// + /// + /// + public static Pointer operator +(Pointer pointer, int offset) + { +#if DEBUG + if (pointer._address == 0) + { + throw new InvalidOperationException("The pointer is a null pointer."); + } +#endif + pointer._address = (uint)(pointer._address + offset); + return pointer; + } + + /// + /// Increase the address of a pointer by the given number of bytes. + /// + /// + /// + /// + public static Pointer operator +(Pointer pointer, uint offset) + { +#if DEBUG + if (pointer._address == 0) + { + throw new InvalidOperationException("The pointer is a null pointer."); + } +#endif + pointer._address += offset; + return pointer; + } + + /// + /// Increment the address of a pointer. + /// + /// + /// + public static Pointer operator ++(Pointer pointer) + { +#if DEBUG + if (pointer._address == 0) + { + throw new InvalidOperationException("The pointer being incremented is a null pointer."); + } +#endif + pointer._address++; + return pointer; + } + + /// + /// Decrease the address of a pointer by the given number of bytes. + /// + /// + /// + /// + public static Pointer operator -(Pointer pointer, int offset) + { +#if DEBUG + if (pointer._address == 0) + { + throw new InvalidOperationException("The pointer is a null pointer."); + } +#endif + pointer._address = (uint)(pointer._address - offset); + return pointer; + } + + /// + /// Decrease the address of a pointer by the given number of bytes. + /// + /// + /// + /// + public static Pointer operator -(Pointer pointer, uint offset) + { +#if DEBUG + if (pointer._address == 0) + { + throw new InvalidOperationException("The pointer is a null pointer."); + } +#endif + pointer._address -= offset; + return pointer; + } + + /// + /// Decrement the address of a pointer. + /// + /// + /// + public static Pointer operator --(Pointer pointer) + { +#if DEBUG + if (pointer._address == 0) + { + throw new InvalidOperationException("The pointer being decremented is a null pointer."); + } +#endif + pointer._address--; + return pointer; + } + + /// + /// Subtract two pointers. + /// + /// + /// + /// The number of bytes between the two pointers. + public static uint operator -(Pointer pointer1, Pointer pointer2) + { +#if DEBUG + if (pointer1._address == 0) + { + throw new InvalidOperationException( + "The pointer to the left of the subtraction operator is a null pointer."); + } + if (pointer2._address == 0) + { + throw new InvalidOperationException( + "The pointer to the right of the subtraction operator is a null pointer."); + } +#endif + return pointer1._address - pointer2._address; + } + + /// + /// Compare pointers. + /// + /// + /// + /// + public static bool operator <(Pointer pointer1, Pointer pointer2) + { +#if DEBUG + if (pointer1._address == 0) + { + throw new InvalidOperationException( + "The pointer to the left of the less than operator is a null pointer."); + } + if (pointer2._address == 0) + { + throw new InvalidOperationException( + "The pointer to the right of the less than operator is a null pointer."); + } +#endif + return pointer1._address < pointer2._address; + } + + /// + /// Compare two pointers. + /// + /// + /// + /// + public static bool operator <=(Pointer pointer1, Pointer pointer2) + { +#if DEBUG + if (pointer1._address == 0) + { + throw new InvalidOperationException( + "The pointer to the left of the less than or equal to operator is a null pointer."); + } + if (pointer2._address == 0) + { + throw new InvalidOperationException( + "The pointer to the right of the less than or equal to operator is a null pointer."); + } +#endif + return pointer1._address <= pointer2._address; + } + + /// + /// Compare two pointers. + /// + /// + /// + /// + public static bool operator >(Pointer pointer1, Pointer pointer2) + { +#if DEBUG + if (pointer1._address == 0) + { + throw new InvalidOperationException( + "The pointer to the left of the greater than operator is a null pointer."); + } + if (pointer2._address == 0) + { + throw new InvalidOperationException( + "The pointer to the right of the greater than operator is a null pointer."); + } +#endif + return pointer1._address > pointer2._address; + } + + /// + /// Compare two pointers. + /// + /// + /// + /// + public static bool operator >=(Pointer pointer1, Pointer pointer2) + { +#if DEBUG + if (pointer1._address == 0) + { + throw new InvalidOperationException( + "The pointer to the left of the greater than or equal to operator is a null pointer."); + } + if (pointer2._address == 0) + { + throw new InvalidOperationException( + "The pointer to the right of the greater than or equal to operator is a null pointer."); + } +#endif + return pointer1._address >= pointer2._address; + } + + /// + /// Compare two pointers. + /// + /// + /// + /// + public static bool operator ==(Pointer pointer1, Pointer pointer2) + { + return pointer1._address == pointer2._address; + } + + /// + /// Compare two pointers. + /// + /// + /// + /// + public static bool operator !=(Pointer pointer1, Pointer pointer2) + { + return pointer1._address != pointer2._address; + } + + /// + /// Indicates whether this instance and a specified object are equal. + /// + /// true if obj and this instance are the same type and represent the same value; otherwise, false. + /// Another object to compare to. + public override bool Equals(object obj) + { + if (obj is Pointer) + { + Pointer pointer = (Pointer)obj; + return pointer._address == _address; + } + return base.Equals(obj); + } + + /// + /// Returns the hash code for this instance. + /// + /// A 32-bit signed integer that is the hash code for this instance. + public override int GetHashCode() + { + return _address.GetHashCode(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/PpmContext.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/PpmContext.cs new file mode 100644 index 0000000000..6ec1a7b84b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/PpmContext.cs @@ -0,0 +1,815 @@ +namespace SharpCompress.Compressors.PPMd.I1 +{ + /// + /// The PPM context structure. This is tightly coupled with . + /// + /// + /// + /// This must be a structure rather than a class because several places in the associated code assume that + /// is a value type (meaning that assignment creates a completely new copy of + /// the instance rather than just copying a reference to the same instance). + /// + /// + internal partial class Model + { + /// + /// The structure which represents the current PPM context. This is 12 bytes in size. + /// + internal struct PpmContext + { + public uint _address; + public byte[] _memory; + public static readonly PpmContext ZERO = new PpmContext(0, null); + public const int SIZE = 12; + + /// + /// Initializes a new instance of the structure. + /// + public PpmContext(uint address, byte[] memory) + { + _address = address; + _memory = memory; + } + + /// + /// Gets or sets the number statistics. + /// + public byte NumberStatistics { get => _memory[_address]; set => _memory[_address] = value; } + + /// + /// Gets or sets the flags. + /// + public byte Flags { get => _memory[_address + 1]; set => _memory[_address + 1] = value; } + + /// + /// Gets or sets the summary frequency. + /// + public ushort SummaryFrequency + { + get => (ushort)(_memory[_address + 2] | _memory[_address + 3] << 8); + set + { + _memory[_address + 2] = (byte)value; + _memory[_address + 3] = (byte)(value >> 8); + } + } + + /// + /// Gets or sets the statistics. + /// + public PpmState Statistics + { + get => new PpmState( + _memory[_address + 4] | ((uint)_memory[_address + 5]) << 8 | + ((uint)_memory[_address + 6]) << 16 | ((uint)_memory[_address + 7]) << 24, _memory); + set + { + _memory[_address + 4] = (byte)value._address; + _memory[_address + 5] = (byte)(value._address >> 8); + _memory[_address + 6] = (byte)(value._address >> 16); + _memory[_address + 7] = (byte)(value._address >> 24); + } + } + + /// + /// Gets or sets the suffix. + /// + public PpmContext Suffix + { + get => new PpmContext( + _memory[_address + 8] | ((uint)_memory[_address + 9]) << 8 | + ((uint)_memory[_address + 10]) << 16 | ((uint)_memory[_address + 11]) << 24, _memory); + set + { + _memory[_address + 8] = (byte)value._address; + _memory[_address + 9] = (byte)(value._address >> 8); + _memory[_address + 10] = (byte)(value._address >> 16); + _memory[_address + 11] = (byte)(value._address >> 24); + } + } + + /// + /// The first PPM state associated with the PPM context. + /// + /// + /// + /// The first PPM state overlaps this PPM context instance (the context.SummaryFrequency and context.Statistics members + /// of PpmContext use 6 bytes and so can therefore fit into the space used by the Symbol, Frequency and + /// Successor members of PpmState, since they also add up to 6 bytes). + /// + /// + /// PpmContext (context.SummaryFrequency and context.Statistics use 6 bytes) + /// 1 context.NumberStatistics + /// 1 context.Flags + /// 2 context.SummaryFrequency + /// 4 context.Statistics (pointer to PpmState) + /// 4 context.Suffix (pointer to PpmContext) + /// + /// + /// PpmState (total of 6 bytes) + /// 1 Symbol + /// 1 Frequency + /// 4 Successor (pointer to PpmContext) + /// + /// + /// + public PpmState FirstState => new PpmState(_address + 2, _memory); + + /// + /// Gets or sets the symbol of the first PPM state. This is provided for convenience. The same + /// information can be obtained using the Symbol property on the PPM state provided by the + /// property. + /// + public byte FirstStateSymbol { get => _memory[_address + 2]; set => _memory[_address + 2] = value; } + + /// + /// Gets or sets the frequency of the first PPM state. This is provided for convenience. The same + /// information can be obtained using the Frequency property on the PPM state provided by the + ///context.FirstState property. + /// + public byte FirstStateFrequency { get => _memory[_address + 3]; set => _memory[_address + 3] = value; } + + /// + /// Gets or sets the successor of the first PPM state. This is provided for convenience. The same + /// information can be obtained using the Successor property on the PPM state provided by the + /// + public PpmContext FirstStateSuccessor + { + get => new PpmContext( + _memory[_address + 4] | ((uint)_memory[_address + 5]) << 8 | + ((uint)_memory[_address + 6]) << 16 | ((uint)_memory[_address + 7]) << 24, _memory); + set + { + _memory[_address + 4] = (byte)value._address; + _memory[_address + 5] = (byte)(value._address >> 8); + _memory[_address + 6] = (byte)(value._address >> 16); + _memory[_address + 7] = (byte)(value._address >> 24); + } + } + + /// + /// Allow a pointer to be implicitly converted to a PPM context. + /// + /// + /// + public static implicit operator PpmContext(Pointer pointer) + { + return new PpmContext(pointer._address, pointer._memory); + } + + /// + /// Allow pointer-like addition on a PPM context. + /// + /// + /// + /// + public static PpmContext operator +(PpmContext context, int offset) + { + context._address = (uint)(context._address + offset * SIZE); + return context; + } + + /// + /// Allow pointer-like subtraction on a PPM context. + /// + /// + /// + /// + public static PpmContext operator -(PpmContext context, int offset) + { + context._address = (uint)(context._address - offset * SIZE); + return context; + } + + /// + /// Compare two PPM contexts. + /// + /// + /// + /// + public static bool operator <=(PpmContext context1, PpmContext context2) + { + return context1._address <= context2._address; + } + + /// + /// Compare two PPM contexts. + /// + /// + /// + /// + public static bool operator >=(PpmContext context1, PpmContext context2) + { + return context1._address >= context2._address; + } + + /// + /// Compare two PPM contexts. + /// + /// + /// + /// + public static bool operator ==(PpmContext context1, PpmContext context2) + { + return context1._address == context2._address; + } + + /// + /// Compare two PPM contexts. + /// + /// + /// + /// + public static bool operator !=(PpmContext context1, PpmContext context2) + { + return context1._address != context2._address; + } + + /// + /// Indicates whether this instance and a specified object are equal. + /// + /// true if obj and this instance are the same type and represent the same value; otherwise, false. + /// Another object to compare to. + public override bool Equals(object obj) + { + if (obj is PpmContext) + { + PpmContext context = (PpmContext)obj; + return context._address == _address; + } + return base.Equals(obj); + } + + /// + /// Returns the hash code for this instance. + /// + /// A 32-bit signed integer that is the hash code for this instance. + public override int GetHashCode() + { + return _address.GetHashCode(); + } + } + + private void EncodeBinarySymbol(int symbol, PpmContext context) + { + PpmState state = context.FirstState; + int index1 = _probabilities[state.Frequency - 1]; + int index2 = _numberStatisticsToBinarySummaryIndex[context.Suffix.NumberStatistics] + _previousSuccess + + context.Flags + ((_runLength >> 26) & 0x20); + + if (state.Symbol == symbol) + { + _foundState = state; + state.Frequency += (byte)((state.Frequency < 196) ? 1 : 0); + _coder._lowCount = 0; + _coder._highCount = _binarySummary[index1, index2]; + _binarySummary[index1, index2] += + (ushort)(INTERVAL - Mean(_binarySummary[index1, index2], PERIOD_BIT_COUNT, 2)); + _previousSuccess = 1; + _runLength++; + } + else + { + _coder._lowCount = _binarySummary[index1, index2]; + _binarySummary[index1, index2] -= (ushort)Mean(_binarySummary[index1, index2], PERIOD_BIT_COUNT, 2); + _coder._highCount = BINARY_SCALE; + _initialEscape = EXPONENTIAL_ESCAPES[_binarySummary[index1, index2] >> 10]; + _characterMask[state.Symbol] = _escapeCount; + _previousSuccess = 0; + _numberMasked = 0; + _foundState = PpmState.ZERO; + } + } + + private void EncodeSymbol1(int symbol, PpmContext context) + { + uint lowCount; + uint index = context.Statistics.Symbol; + PpmState state = context.Statistics; + _coder._scale = context.SummaryFrequency; + if (index == symbol) + { + _coder._highCount = state.Frequency; + _previousSuccess = (byte)((2 * _coder._highCount >= _coder._scale) ? 1 : 0); + _foundState = state; + _foundState.Frequency += 4; + context.SummaryFrequency += 4; + _runLength += _previousSuccess; + if (state.Frequency > MAXIMUM_FREQUENCY) + { + Rescale(context); + } + _coder._lowCount = 0; + return; + } + + lowCount = state.Frequency; + index = context.NumberStatistics; + _previousSuccess = 0; + while ((++state).Symbol != symbol) + { + lowCount += state.Frequency; + if (--index == 0) + { + _coder._lowCount = lowCount; + _characterMask[state.Symbol] = _escapeCount; + _numberMasked = context.NumberStatistics; + index = context.NumberStatistics; + _foundState = PpmState.ZERO; + do + { + _characterMask[(--state).Symbol] = _escapeCount; + } + while (--index != 0); + _coder._highCount = _coder._scale; + return; + } + } + _coder._highCount = (_coder._lowCount = lowCount) + state.Frequency; + Update1(state, context); + } + + private void EncodeSymbol2(int symbol, PpmContext context) + { + See2Context see2Context = MakeEscapeFrequency(context); + uint currentSymbol; + uint lowCount = 0; + uint index = (uint)(context.NumberStatistics - _numberMasked); + PpmState state = context.Statistics - 1; + + do + { + do + { + currentSymbol = state[1].Symbol; + state++; + } + while (_characterMask[currentSymbol] == _escapeCount); + _characterMask[currentSymbol] = _escapeCount; + if (currentSymbol == symbol) + { + goto SymbolFound; + } + lowCount += state.Frequency; + } + while (--index != 0); + + _coder._lowCount = lowCount; + _coder._scale += _coder._lowCount; + _coder._highCount = _coder._scale; + see2Context._summary += (ushort)_coder._scale; + _numberMasked = context.NumberStatistics; + return; + + SymbolFound: + _coder._lowCount = lowCount; + lowCount += state.Frequency; + _coder._highCount = lowCount; + for (PpmState p1 = state; --index != 0;) + { + do + { + currentSymbol = p1[1].Symbol; + p1++; + } + while (_characterMask[currentSymbol] == _escapeCount); + lowCount += p1.Frequency; + } + _coder._scale += lowCount; + see2Context.Update(); + Update2(state, context); + } + + private void DecodeBinarySymbol(PpmContext context) + { + PpmState state = context.FirstState; + int index1 = _probabilities[state.Frequency - 1]; + int index2 = _numberStatisticsToBinarySummaryIndex[context.Suffix.NumberStatistics] + _previousSuccess + + context.Flags + ((_runLength >> 26) & 0x20); + + if (_coder.RangeGetCurrentShiftCount(TOTAL_BIT_COUNT) < _binarySummary[index1, index2]) + { + _foundState = state; + state.Frequency += (byte)((state.Frequency < 196) ? 1 : 0); + _coder._lowCount = 0; + _coder._highCount = _binarySummary[index1, index2]; + _binarySummary[index1, index2] += + (ushort)(INTERVAL - Mean(_binarySummary[index1, index2], PERIOD_BIT_COUNT, 2)); + _previousSuccess = 1; + _runLength++; + } + else + { + _coder._lowCount = _binarySummary[index1, index2]; + _binarySummary[index1, index2] -= (ushort)Mean(_binarySummary[index1, index2], PERIOD_BIT_COUNT, 2); + _coder._highCount = BINARY_SCALE; + _initialEscape = EXPONENTIAL_ESCAPES[_binarySummary[index1, index2] >> 10]; + _characterMask[state.Symbol] = _escapeCount; + _previousSuccess = 0; + _numberMasked = 0; + _foundState = PpmState.ZERO; + } + } + + private void DecodeSymbol1(PpmContext context) + { + uint index; + uint count; + uint highCount = context.Statistics.Frequency; + PpmState state = context.Statistics; + _coder._scale = context.SummaryFrequency; + + count = _coder.RangeGetCurrentCount(); + if (count < highCount) + { + _coder._highCount = highCount; + _previousSuccess = (byte)((2 * _coder._highCount >= _coder._scale) ? 1 : 0); + _foundState = state; + highCount += 4; + _foundState.Frequency = (byte)highCount; + context.SummaryFrequency += 4; + _runLength += _previousSuccess; + if (highCount > MAXIMUM_FREQUENCY) + { + Rescale(context); + } + _coder._lowCount = 0; + return; + } + + index = context.NumberStatistics; + _previousSuccess = 0; + while ((highCount += (++state).Frequency) <= count) + { + if (--index == 0) + { + _coder._lowCount = highCount; + _characterMask[state.Symbol] = _escapeCount; + _numberMasked = context.NumberStatistics; + index = context.NumberStatistics; + _foundState = PpmState.ZERO; + do + { + _characterMask[(--state).Symbol] = _escapeCount; + } + while (--index != 0); + _coder._highCount = _coder._scale; + return; + } + } + _coder._highCount = highCount; + _coder._lowCount = _coder._highCount - state.Frequency; + Update1(state, context); + } + + private void DecodeSymbol2(PpmContext context) + { + See2Context see2Context = MakeEscapeFrequency(context); + uint currentSymbol; + uint count; + uint highCount = 0; + uint index = (uint)(context.NumberStatistics - _numberMasked); + uint stateIndex = 0; + PpmState state = context.Statistics - 1; + + do + { + do + { + currentSymbol = state[1].Symbol; + state++; + } + while (_characterMask[currentSymbol] == _escapeCount); + highCount += state.Frequency; + _decodeStates[stateIndex++] = state; + + // note that decodeStates is a static array that is re-used on each call to this method (for performance reasons) + } + while (--index != 0); + + _coder._scale += highCount; + count = _coder.RangeGetCurrentCount(); + stateIndex = 0; + state = _decodeStates[stateIndex]; + if (count < highCount) + { + highCount = 0; + while ((highCount += state.Frequency) <= count) + { + state = _decodeStates[++stateIndex]; + } + _coder._highCount = highCount; + _coder._lowCount = _coder._highCount - state.Frequency; + see2Context.Update(); + Update2(state, context); + } + else + { + _coder._lowCount = highCount; + _coder._highCount = _coder._scale; + index = (uint)(context.NumberStatistics - _numberMasked); + _numberMasked = context.NumberStatistics; + do + { + _characterMask[_decodeStates[stateIndex].Symbol] = _escapeCount; + stateIndex++; + } + while (--index != 0); + see2Context._summary += (ushort)_coder._scale; + } + } + + private void Update1(PpmState state, PpmContext context) + { + _foundState = state; + _foundState.Frequency += 4; + context.SummaryFrequency += 4; + if (state[0].Frequency > state[-1].Frequency) + { + Swap(state[0], state[-1]); + _foundState = --state; + if (state.Frequency > MAXIMUM_FREQUENCY) + { + Rescale(context); + } + } + } + + private void Update2(PpmState state, PpmContext context) + { + _foundState = state; + _foundState.Frequency += 4; + context.SummaryFrequency += 4; + if (state.Frequency > MAXIMUM_FREQUENCY) + { + Rescale(context); + } + _escapeCount++; + _runLength = _initialRunLength; + } + + private See2Context MakeEscapeFrequency(PpmContext context) + { + uint numberStatistics = (uint)2 * context.NumberStatistics; + See2Context see2Context; + + if (context.NumberStatistics != 0xff) + { + // Note that context.Flags is always in the range 0 .. 28 (this ensures that the index used for the second + // dimension of the see2Contexts array is always in the range 0 .. 31). + + numberStatistics = context.Suffix.NumberStatistics; + int index1 = _probabilities[context.NumberStatistics + 2] - 3; + int index2 = ((context.SummaryFrequency > 11 * (context.NumberStatistics + 1)) ? 1 : 0) + + ((2 * context.NumberStatistics < numberStatistics + _numberMasked) ? 2 : 0) + context.Flags; + see2Context = _see2Contexts[index1, index2]; + _coder._scale = see2Context.Mean(); + } + else + { + see2Context = _emptySee2Context; + _coder._scale = 1; + } + + return see2Context; + } + + private void Rescale(PpmContext context) + { + uint oldUnitCount; + int adder; + uint escapeFrequency; + uint index = context.NumberStatistics; + + byte localSymbol; + byte localFrequency; + PpmContext localSuccessor; + PpmState p1; + PpmState state; + + for (state = _foundState; state != context.Statistics; state--) + { + Swap(state[0], state[-1]); + } + + state.Frequency += 4; + context.SummaryFrequency += 4; + escapeFrequency = (uint)(context.SummaryFrequency - state.Frequency); + adder = (_orderFall != 0 || _method > ModelRestorationMethod.Freeze) ? 1 : 0; + state.Frequency = (byte)((state.Frequency + adder) >> 1); + context.SummaryFrequency = state.Frequency; + + do + { + escapeFrequency -= (++state).Frequency; + state.Frequency = (byte)((state.Frequency + adder) >> 1); + context.SummaryFrequency += state.Frequency; + if (state[0].Frequency > state[-1].Frequency) + { + p1 = state; + localSymbol = p1.Symbol; + localFrequency = p1.Frequency; + localSuccessor = p1.Successor; + do + { + Copy(p1[0], p1[-1]); + } + while (localFrequency > (--p1)[-1].Frequency); + p1.Symbol = localSymbol; + p1.Frequency = localFrequency; + p1.Successor = localSuccessor; + } + } + while (--index != 0); + + if (state.Frequency == 0) + { + do + { + index++; + } + while ((--state).Frequency == 0); + + escapeFrequency += index; + oldUnitCount = (uint)((context.NumberStatistics + 2) >> 1); + context.NumberStatistics -= (byte)index; + if (context.NumberStatistics == 0) + { + localSymbol = context.Statistics.Symbol; + localFrequency = context.Statistics.Frequency; + localSuccessor = context.Statistics.Successor; + localFrequency = (byte)((2 * localFrequency + escapeFrequency - 1) / escapeFrequency); + if (localFrequency > MAXIMUM_FREQUENCY / 3) + { + localFrequency = (byte)(MAXIMUM_FREQUENCY / 3); + } + _allocator.FreeUnits(context.Statistics, oldUnitCount); + context.FirstStateSymbol = localSymbol; + context.FirstStateFrequency = localFrequency; + context.FirstStateSuccessor = localSuccessor; + context.Flags = (byte)((context.Flags & 0x10) + ((localSymbol >= 0x40) ? 0x08 : 0x00)); + _foundState = context.FirstState; + return; + } + + context.Statistics = _allocator.ShrinkUnits(context.Statistics, oldUnitCount, + (uint)((context.NumberStatistics + 2) >> 1)); + context.Flags &= 0xf7; + index = context.NumberStatistics; + state = context.Statistics; + context.Flags |= (byte)((state.Symbol >= 0x40) ? 0x08 : 0x00); + do + { + context.Flags |= (byte)(((++state).Symbol >= 0x40) ? 0x08 : 0x00); + } + while (--index != 0); + } + + escapeFrequency -= (escapeFrequency >> 1); + context.SummaryFrequency += (ushort)escapeFrequency; + context.Flags |= 0x04; + _foundState = context.Statistics; + } + + private void Refresh(uint oldUnitCount, bool scale, PpmContext context) + { + int index = context.NumberStatistics; + int escapeFrequency; + int scaleValue = (scale ? 1 : 0); + + context.Statistics = _allocator.ShrinkUnits(context.Statistics, oldUnitCount, (uint)((index + 2) >> 1)); + PpmState statistics = context.Statistics; + context.Flags = + (byte)((context.Flags & (0x10 + (scale ? 0x04 : 0x00))) + ((statistics.Symbol >= 0x40) ? 0x08 : 0x00)); + escapeFrequency = context.SummaryFrequency - statistics.Frequency; + statistics.Frequency = (byte)((statistics.Frequency + scaleValue) >> scaleValue); + context.SummaryFrequency = statistics.Frequency; + + do + { + escapeFrequency -= (++statistics).Frequency; + statistics.Frequency = (byte)((statistics.Frequency + scaleValue) >> scaleValue); + context.SummaryFrequency += statistics.Frequency; + context.Flags |= (byte)((statistics.Symbol >= 0x40) ? 0x08 : 0x00); + } + while (--index != 0); + + escapeFrequency = (escapeFrequency + scaleValue) >> scaleValue; + context.SummaryFrequency += (ushort)escapeFrequency; + } + + private PpmContext CutOff(int order, PpmContext context) + { + int index; + PpmState state; + + if (context.NumberStatistics == 0) + { + state = context.FirstState; + if ((Pointer)state.Successor >= _allocator._baseUnit) + { + if (order < _modelOrder) + { + state.Successor = CutOff(order + 1, state.Successor); + } + else + { + state.Successor = PpmContext.ZERO; + } + + if (state.Successor == PpmContext.ZERO && order > ORDER_BOUND) + { + _allocator.SpecialFreeUnits(context); + return PpmContext.ZERO; + } + + return context; + } + _allocator.SpecialFreeUnits(context); + return PpmContext.ZERO; + } + + uint unitCount = (uint)((context.NumberStatistics + 2) >> 1); + context.Statistics = _allocator.MoveUnitsUp(context.Statistics, unitCount); + index = context.NumberStatistics; + for (state = context.Statistics + index; state >= context.Statistics; state--) + { + if (state.Successor < _allocator._baseUnit) + { + state.Successor = PpmContext.ZERO; + Swap(state, context.Statistics[index--]); + } + else if (order < _modelOrder) + { + state.Successor = CutOff(order + 1, state.Successor); + } + else + { + state.Successor = PpmContext.ZERO; + } + } + + if (index != context.NumberStatistics && order != 0) + { + context.NumberStatistics = (byte)index; + state = context.Statistics; + if (index < 0) + { + _allocator.FreeUnits(state, unitCount); + _allocator.SpecialFreeUnits(context); + return PpmContext.ZERO; + } + if (index == 0) + { + context.Flags = (byte)((context.Flags & 0x10) + ((state.Symbol >= 0x40) ? 0x08 : 0x00)); + Copy(context.FirstState, state); + _allocator.FreeUnits(state, unitCount); + context.FirstStateFrequency = (byte)((context.FirstStateFrequency + 11) >> 3); + } + else + { + Refresh(unitCount, context.SummaryFrequency > 16 * index, context); + } + } + + return context; + } + + private PpmContext RemoveBinaryContexts(int order, PpmContext context) + { + if (context.NumberStatistics == 0) + { + PpmState state = context.FirstState; + if ((Pointer)state.Successor >= _allocator._baseUnit && order < _modelOrder) + { + state.Successor = RemoveBinaryContexts(order + 1, state.Successor); + } + else + { + state.Successor = PpmContext.ZERO; + } + if ((state.Successor == PpmContext.ZERO) && + (context.Suffix.NumberStatistics == 0 || context.Suffix.Flags == 0xff)) + { + _allocator.FreeUnits(context, 1); + return PpmContext.ZERO; + } + return context; + } + + for (PpmState state = context.Statistics + context.NumberStatistics; state >= context.Statistics; state--) + { + if ((Pointer)state.Successor >= _allocator._baseUnit && order < _modelOrder) + { + state.Successor = RemoveBinaryContexts(order + 1, state.Successor); + } + else + { + state.Successor = PpmContext.ZERO; + } + } + + return context; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/PpmState.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/PpmState.cs new file mode 100644 index 0000000000..be5f752b13 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/PpmState.cs @@ -0,0 +1,197 @@ +#region Using + + + +#endregion + +namespace SharpCompress.Compressors.PPMd.I1 +{ + /// + /// PPM state. + /// + /// + /// + /// This must be a structure rather than a class because several places in the associated code assume that + /// is a value type (meaning that assignment creates a completely new copy of the + /// instance rather than just copying a reference to the same instance). + /// + /// + /// Note that is a field rather than a property for performance reasons. + /// + /// + internal struct PpmState + { + public uint _address; + public byte[] _memory; + public static readonly PpmState ZERO = new PpmState(0, null); + public const int SIZE = 6; + + /// + /// Initializes a new instance of the structure. + /// + public PpmState(uint address, byte[] memory) + { + _address = address; + _memory = memory; + } + + /// + /// Gets or sets the symbol. + /// + public byte Symbol { get => _memory[_address]; set => _memory[_address] = value; } + + /// + /// Gets or sets the frequency. + /// + public byte Frequency { get => _memory[_address + 1]; set => _memory[_address + 1] = value; } + + /// + /// Gets or sets the successor. + /// + public Model.PpmContext Successor + { + get => new Model.PpmContext( + _memory[_address + 2] | ((uint)_memory[_address + 3]) << 8 | + ((uint)_memory[_address + 4]) << 16 | ((uint)_memory[_address + 5]) << 24, _memory); + set + { + _memory[_address + 2] = (byte)value._address; + _memory[_address + 3] = (byte)(value._address >> 8); + _memory[_address + 4] = (byte)(value._address >> 16); + _memory[_address + 5] = (byte)(value._address >> 24); + } + } + + /// + /// Gets the at the relative to this + /// . + /// + /// + /// + public PpmState this[int offset] => new PpmState((uint)(_address + offset * SIZE), _memory); + + /// + /// Allow a pointer to be implicitly converted to a PPM state. + /// + /// + /// + public static implicit operator PpmState(Pointer pointer) + { + return new PpmState(pointer._address, pointer._memory); + } + + /// + /// Allow pointer-like addition on a PPM state. + /// + /// + /// + /// + public static PpmState operator +(PpmState state, int offset) + { + state._address = (uint)(state._address + offset * SIZE); + return state; + } + + /// + /// Allow pointer-like incrementing on a PPM state. + /// + /// + /// + public static PpmState operator ++(PpmState state) + { + state._address += SIZE; + return state; + } + + /// + /// Allow pointer-like subtraction on a PPM state. + /// + /// + /// + /// + public static PpmState operator -(PpmState state, int offset) + { + state._address = (uint)(state._address - offset * SIZE); + return state; + } + + /// + /// Allow pointer-like decrementing on a PPM state. + /// + /// + /// + public static PpmState operator --(PpmState state) + { + state._address -= SIZE; + return state; + } + + /// + /// Compare two PPM states. + /// + /// + /// + /// + public static bool operator <=(PpmState state1, PpmState state2) + { + return state1._address <= state2._address; + } + + /// + /// Compare two PPM states. + /// + /// + /// + /// + public static bool operator >=(PpmState state1, PpmState state2) + { + return state1._address >= state2._address; + } + + /// + /// Compare two PPM states. + /// + /// + /// + /// + public static bool operator ==(PpmState state1, PpmState state2) + { + return state1._address == state2._address; + } + + /// + /// Compare two PPM states. + /// + /// + /// + /// + public static bool operator !=(PpmState state1, PpmState state2) + { + return state1._address != state2._address; + } + + /// + /// Indicates whether this instance and a specified object are equal. + /// + /// true if obj and this instance are the same type and represent the same value; otherwise, false. + /// Another object to compare to. + public override bool Equals(object obj) + { + if (obj is PpmState) + { + PpmState state = (PpmState)obj; + return state._address == _address; + } + return base.Equals(obj); + } + + /// + /// Returns the hash code for this instance. + /// + /// A 32-bit signed integer that is the hash code for this instance. + public override int GetHashCode() + { + return _address.GetHashCode(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/See2Context.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/See2Context.cs new file mode 100644 index 0000000000..3029d85bc1 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/I1/See2Context.cs @@ -0,0 +1,55 @@ +#region Using + + + +#endregion + +namespace SharpCompress.Compressors.PPMd.I1 +{ + /// + /// SEE2 (secondary escape estimation) contexts for PPM contexts with masked symbols. + /// + /// + /// + /// This must be a class rather than a structure because MakeEscapeFrequency returns a See2Context + /// instance from the see2Contexts array. The caller (for example, EncodeSymbol2) then updates the + /// returned See2Context instance and expects the updates to be reflected in the see2Contexts array. + /// This would not happen if this were a structure. + /// + /// + /// Note that in most cases fields are used rather than properties for performance reasons (for example, + /// is a field rather than a property). + /// + /// + internal class See2Context + { + private const byte PERIOD_BIT_COUNT = 7; + + public ushort _summary; + public byte _shift; + public byte _count; + + public void Initialize(uint initialValue) + { + _shift = PERIOD_BIT_COUNT - 4; + _summary = (ushort)(initialValue << _shift); + _count = 7; + } + + public uint Mean() + { + uint value = (uint)(_summary >> _shift); + _summary = (ushort)(_summary - value); + return (uint)(value + ((value == 0) ? 1 : 0)); + } + + public void Update() + { + if (_shift < PERIOD_BIT_COUNT && --_count == 0) + { + _summary += _summary; + _count = (byte)(3 << _shift++); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdProperties.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdProperties.cs new file mode 100644 index 0000000000..2ad8d4d9d7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdProperties.cs @@ -0,0 +1,71 @@ +using SharpCompress.Compressors.PPMd.I1; +using SharpCompress.Converters; + +namespace SharpCompress.Compressors.PPMd +{ + public class PpmdProperties + { + + private int _allocatorSize; + internal Allocator _allocator; + + public PpmdProperties() + : this(16 << 20, 6) + { + } + + public PpmdProperties(int allocatorSize, int modelOrder) + : this(allocatorSize, modelOrder, ModelRestorationMethod.Restart) + { + } + + internal PpmdProperties(int allocatorSize, int modelOrder, ModelRestorationMethod modelRestorationMethod) + { + AllocatorSize = allocatorSize; + ModelOrder = modelOrder; + RestorationMethod = modelRestorationMethod; + } + + public int ModelOrder { get; } + public PpmdVersion Version { get; } = PpmdVersion.I1; + internal ModelRestorationMethod RestorationMethod { get; } + + public PpmdProperties(byte[] properties) + { + if (properties.Length == 2) + { + ushort props = DataConverter.LittleEndian.GetUInt16(properties, 0); + AllocatorSize = (((props >> 4) & 0xff) + 1) << 20; + ModelOrder = (props & 0x0f) + 1; + RestorationMethod = (ModelRestorationMethod)(props >> 12); + } + else if (properties.Length == 5) + { + Version = PpmdVersion.H7Z; + AllocatorSize = DataConverter.LittleEndian.GetInt32(properties, 1); + ModelOrder = properties[0]; + } + } + + public int AllocatorSize + { + get => _allocatorSize; + set + { + _allocatorSize = value; + if (Version == PpmdVersion.I1) + { + if (_allocator == null) + { + _allocator = new Allocator(); + } + _allocator.Start(_allocatorSize); + } + } + } + + public byte[] Properties => DataConverter.LittleEndian.GetBytes( + (ushort) + ((ModelOrder - 1) + (((AllocatorSize >> 20) - 1) << 4) + ((ushort)RestorationMethod << 12))); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdStream.cs new file mode 100644 index 0000000000..b9629d5855 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdStream.cs @@ -0,0 +1,142 @@ +using System; +using System.IO; +using SharpCompress.Compressors.LZMA.RangeCoder; +using SharpCompress.Compressors.PPMd.H; +using SharpCompress.Compressors.PPMd.I1; + +namespace SharpCompress.Compressors.PPMd +{ + public class PpmdStream : Stream + { + private readonly PpmdProperties _properties; + private readonly Stream _stream; + private readonly bool _compress; + private readonly Model _model; + private readonly ModelPpm _modelH; + private readonly Decoder _decoder; + private long _position; + private bool _isDisposed; + + public PpmdStream(PpmdProperties properties, Stream stream, bool compress) + { + _properties = properties; + _stream = stream; + _compress = compress; + + if (properties.Version == PpmdVersion.I1) + { + _model = new Model(); + if (compress) + { + _model.EncodeStart(properties); + } + else + { + _model.DecodeStart(stream, properties); + } + } + if (properties.Version == PpmdVersion.H) + { + _modelH = new ModelPpm(); + if (compress) + { + throw new NotImplementedException(); + } + _modelH.DecodeInit(stream, properties.ModelOrder, properties.AllocatorSize); + } + if (properties.Version == PpmdVersion.H7Z) + { + _modelH = new ModelPpm(); + if (compress) + { + throw new NotImplementedException(); + } + _modelH.DecodeInit(null, properties.ModelOrder, properties.AllocatorSize); + _decoder = new Decoder(); + _decoder.Init(stream); + } + } + + public override bool CanRead => !_compress; + + public override bool CanSeek => false; + + public override bool CanWrite => _compress; + + public override void Flush() + { + } + + protected override void Dispose(bool isDisposing) + { + if (_isDisposed) + { + return; + } + _isDisposed = true; + if (isDisposing) + { + if (_compress) + { + _model.EncodeBlock(_stream, new MemoryStream(), true); + } + } + base.Dispose(isDisposing); + } + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => _position; set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + if (_compress) + { + return 0; + } + int size = 0; + if (_properties.Version == PpmdVersion.I1) + { + size = _model.DecodeBlock(_stream, buffer, offset, count); + } + if (_properties.Version == PpmdVersion.H) + { + int c; + while (size < count && (c = _modelH.DecodeChar()) >= 0) + { + buffer[offset++] = (byte)c; + size++; + } + } + if (_properties.Version == PpmdVersion.H7Z) + { + int c; + while (size < count && (c = _modelH.DecodeChar(_decoder)) >= 0) + { + buffer[offset++] = (byte)c; + size++; + } + } + _position += size; + return size; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + if (_compress) + { + _model.EncodeBlock(_stream, new MemoryStream(buffer, offset, count), false); + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdVersion.cs b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdVersion.cs new file mode 100644 index 0000000000..8ab1aa38b3 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/PPMd/PpmdVersion.cs @@ -0,0 +1,9 @@ +namespace SharpCompress.Compressors.PPMd +{ + public enum PpmdVersion + { + H, + H7Z, + I1 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/IRarUnpack.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/IRarUnpack.cs new file mode 100644 index 0000000000..0cc86a7840 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/IRarUnpack.cs @@ -0,0 +1,18 @@ +using System.IO; +using SharpCompress.Common.Rar.Headers; + +namespace SharpCompress.Compressors.Rar +{ + internal interface IRarUnpack + { + void DoUnpack(FileHeader fileHeader, Stream readStream, Stream writeStream); + void DoUnpack(); + + // eg u/i pause/resume button + bool Suspended { get; set; } + + long DestSize { get; } + int Char { get; } + int PpmEscChar { get; set; } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/MultiVolumeReadOnlyStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/MultiVolumeReadOnlyStream.cs new file mode 100644 index 0000000000..8bdfd64bc2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/MultiVolumeReadOnlyStream.cs @@ -0,0 +1,141 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common; +using SharpCompress.Common.Rar; + +namespace SharpCompress.Compressors.Rar +{ + internal class MultiVolumeReadOnlyStream : Stream + { + private long currentPosition; + private long maxPosition; + + private IEnumerator filePartEnumerator; + private Stream currentStream; + + private readonly IExtractionListener streamListener; + + private long currentPartTotalReadBytes; + private long currentEntryTotalReadBytes; + + internal MultiVolumeReadOnlyStream(IEnumerable parts, IExtractionListener streamListener) + { + this.streamListener = streamListener; + + filePartEnumerator = parts.GetEnumerator(); + filePartEnumerator.MoveNext(); + InitializeNextFilePart(); + } + + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + if (disposing) + { + if (filePartEnumerator != null) + { + filePartEnumerator.Dispose(); + filePartEnumerator = null; + } + currentStream = null; + } + } + + private void InitializeNextFilePart() + { + maxPosition = filePartEnumerator.Current.FileHeader.CompressedSize; + currentPosition = 0; + currentStream = filePartEnumerator.Current.GetCompressedStream(); + + currentPartTotalReadBytes = 0; + + CurrentCrc = filePartEnumerator.Current.FileHeader.FileCrc; + + streamListener.FireFilePartExtractionBegin(filePartEnumerator.Current.FilePartName, + filePartEnumerator.Current.FileHeader.CompressedSize, + filePartEnumerator.Current.FileHeader.UncompressedSize); + } + + public override int Read(byte[] buffer, int offset, int count) + { + int totalRead = 0; + int currentOffset = offset; + int currentCount = count; + while (currentCount > 0) + { + int readSize = currentCount; + if (currentCount > maxPosition - currentPosition) + { + readSize = (int)(maxPosition - currentPosition); + } + + int read = currentStream.Read(buffer, currentOffset, readSize); + if (read < 0) + { + throw new EndOfStreamException(); + } + + currentPosition += read; + currentOffset += read; + currentCount -= read; + totalRead += read; + if (((maxPosition - currentPosition) == 0) + && filePartEnumerator.Current.FileHeader.IsSplitAfter) + { + if (filePartEnumerator.Current.FileHeader.R4Salt != null) + { + throw new InvalidFormatException("Sharpcompress currently does not support multi-volume decryption."); + } + string fileName = filePartEnumerator.Current.FileHeader.FileName; + if (!filePartEnumerator.MoveNext()) + { + throw new InvalidFormatException( + "Multi-part rar file is incomplete. Entry expects a new volume: " + fileName); + } + InitializeNextFilePart(); + } + else + { + break; + } + } + currentPartTotalReadBytes += totalRead; + currentEntryTotalReadBytes += totalRead; + streamListener.FireCompressedBytesRead(currentPartTotalReadBytes, currentEntryTotalReadBytes); + return totalRead; + } + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public uint CurrentCrc { get; private set; } + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarCRC.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarCRC.cs new file mode 100644 index 0000000000..077b2f46b1 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarCRC.cs @@ -0,0 +1,48 @@ +using System; + +namespace SharpCompress.Compressors.Rar +{ + internal static class RarCRC + { + private static readonly uint[] crcTab; + + public static uint CheckCrc(uint startCrc, byte b) { + return (crcTab[((int) ((int) startCrc ^ (int) b)) & 0xff] ^ (startCrc >> 8)); + } + + public static uint CheckCrc(uint startCrc, byte[] data, int offset, int count) + { + int size = Math.Min(data.Length - offset, count); + + for (int i = 0; i < size; i++) + { + startCrc = (crcTab[((int)startCrc ^ data[offset + i]) & 0xff] ^ (startCrc >> 8)); + } + return (startCrc); + } + + static RarCRC() + { + { + crcTab = new uint[256]; + for (uint i = 0; i < 256; i++) + { + uint c = i; + for (int j = 0; j < 8; j++) + { + if ((c & 1) != 0) + { + c = c >> 1; + c ^= 0xEDB88320; + } + else + { + c = c >> 1; + } + } + crcTab[i] = c; + } + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarCrcStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarCrcStream.cs new file mode 100644 index 0000000000..329587e724 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarCrcStream.cs @@ -0,0 +1,45 @@ +using System.IO; +using SharpCompress.Common; +using SharpCompress.Common.Rar.Headers; + +namespace SharpCompress.Compressors.Rar +{ + internal class RarCrcStream : RarStream + { + private readonly MultiVolumeReadOnlyStream readStream; + private uint currentCrc; + + public RarCrcStream(IRarUnpack unpack, FileHeader fileHeader, MultiVolumeReadOnlyStream readStream) + : base(unpack, fileHeader, readStream) + { + this.readStream = readStream; + ResetCrc(); + } + + public uint GetCrc() + { + return ~currentCrc; + } + + public void ResetCrc() + { + currentCrc = 0xffffffff; + } + + public override int Read(byte[] buffer, int offset, int count) + { + var result = base.Read(buffer, offset, count); + if (result != 0) + { + currentCrc = RarCRC.CheckCrc(currentCrc, buffer, offset, result); + } + else if (GetCrc() != readStream.CurrentCrc && count != 0) + { + // NOTE: we use the last FileHeader in a multipart volume to check CRC + throw new InvalidFormatException("file crc mismatch"); + } + + return result; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarStream.cs new file mode 100644 index 0000000000..5d4f13fa82 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/RarStream.cs @@ -0,0 +1,129 @@ +using System; +using System.IO; +using SharpCompress.Common.Rar.Headers; + +namespace SharpCompress.Compressors.Rar +{ + internal class RarStream : Stream + { + private readonly IRarUnpack unpack; + private readonly FileHeader fileHeader; + private readonly Stream readStream; + + private bool fetch; + + private byte[] tmpBuffer = new byte[65536]; + private int tmpOffset; + private int tmpCount; + + private byte[] outBuffer; + private int outOffset; + private int outCount; + private int outTotal; + private bool isDisposed; + + public RarStream(IRarUnpack unpack, FileHeader fileHeader, Stream readStream) + { + this.unpack = unpack; + this.fileHeader = fileHeader; + this.readStream = readStream; + fetch = true; + unpack.DoUnpack(fileHeader, readStream, this); + fetch = false; + } + + protected override void Dispose(bool disposing) + { + if (!isDisposed) { + isDisposed = true; + base.Dispose(disposing); + readStream.Dispose(); + } + } + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override void Flush() + { + } + + public override long Length => fileHeader.UncompressedSize; + + public override long Position { get => fileHeader.UncompressedSize - unpack.DestSize; set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + outTotal = 0; + if (tmpCount > 0) + { + int toCopy = tmpCount < count ? tmpCount : count; + Buffer.BlockCopy(tmpBuffer, tmpOffset, buffer, offset, toCopy); + tmpOffset += toCopy; + tmpCount -= toCopy; + offset += toCopy; + count -= toCopy; + outTotal += toCopy; + } + if (count > 0 && unpack.DestSize > 0) + { + outBuffer = buffer; + outOffset = offset; + outCount = count; + fetch = true; + unpack.DoUnpack(); + fetch = false; + } + return outTotal; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + if (!fetch) + { + throw new NotSupportedException(); + } + if (outCount > 0) + { + int toCopy = outCount < count ? outCount : count; + Buffer.BlockCopy(buffer, offset, outBuffer, outOffset, toCopy); + outOffset += toCopy; + outCount -= toCopy; + offset += toCopy; + count -= toCopy; + outTotal += toCopy; + } + if (count > 0) + { + if (tmpBuffer.Length < tmpCount + count) + { + byte[] newBuffer = + new byte[tmpBuffer.Length * 2 > tmpCount + count ? tmpBuffer.Length * 2 : tmpCount + count]; + Buffer.BlockCopy(tmpBuffer, 0, newBuffer, 0, tmpCount); + tmpBuffer = newBuffer; + } + Buffer.BlockCopy(buffer, offset, tmpBuffer, tmpCount, count); + tmpCount += count; + tmpOffset = 0; + unpack.Suspended = true; + } + else + { + unpack.Suspended = false; + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/AudioVariables.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/AudioVariables.cs new file mode 100644 index 0000000000..c251061751 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/AudioVariables.cs @@ -0,0 +1,26 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal class AudioVariables + { + internal AudioVariables() + { + Dif = new int[11]; + } + + internal int[] Dif { get; } + internal int ByteCount { get; set; } + internal int D1 { get; set; } + + internal int D2 { get; set; } + internal int D3 { get; set; } + internal int D4 { get; set; } + + internal int K1 { get; set; } + internal int K2 { get; set; } + internal int K3 { get; set; } + internal int K4 { get; set; } + internal int K5 { get; set; } + internal int LastChar { get; set; } + internal int LastDelta { get; set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/BitDecode.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/BitDecode.cs new file mode 100644 index 0000000000..a8d499b5e6 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/BitDecode.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal class BitDecode : Decode + { + internal BitDecode() + : base(new int[PackDef.BC]) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/CodeType.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/CodeType.cs new file mode 100644 index 0000000000..e9703ff155 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/CodeType.cs @@ -0,0 +1,15 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal enum CodeType + { + CODE_HUFFMAN, + CODE_LZ, + CODE_LZ2, + CODE_REPEATLZ, + CODE_CACHELZ, + CODE_STARTFILE, + CODE_ENDFILE, + CODE_VM, + CODE_VMDATA + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/Decode.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/Decode.cs new file mode 100644 index 0000000000..ceed74de4d --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/Decode.cs @@ -0,0 +1,34 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal class Decode + { + internal Decode() + : this(new int[2]) + { + } + + protected Decode(int[] customDecodeNum) + { + DecodeLen = new int[16]; + DecodePos = new int[16]; + DecodeNum = customDecodeNum; + } + + /// returns the decode Length array + /// decodeLength + /// + internal int[] DecodeLen { get; } + + /// returns the decode num array + /// decodeNum + /// + internal int[] DecodeNum { get; } + + /// returns the decodePos array + /// decodePos + /// + internal int[] DecodePos { get; } + + internal int MaxNum { get; set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/DistDecode.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/DistDecode.cs new file mode 100644 index 0000000000..1a0b666b87 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/DistDecode.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal class DistDecode : Decode + { + internal DistDecode() + : base(new int[PackDef.DC]) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/FilterType.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/FilterType.cs new file mode 100644 index 0000000000..6d915e447f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/FilterType.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal enum FilterType : byte + { + // These values must not be changed, because we use them directly + // in RAR5 compression and decompression code. + FILTER_DELTA=0, FILTER_E8, FILTER_E8E9, FILTER_ARM, + FILTER_AUDIO, FILTER_RGB, FILTER_ITANIUM, FILTER_PPM, FILTER_NONE + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/LitDecode.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/LitDecode.cs new file mode 100644 index 0000000000..8316c261e5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/LitDecode.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal class LitDecode : Decode + { + internal LitDecode() + : base(new int[PackDef.NC]) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/LowDistDecode.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/LowDistDecode.cs new file mode 100644 index 0000000000..a2095bd42c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/LowDistDecode.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal class LowDistDecode : Decode + { + internal LowDistDecode() + : base(new int[PackDef.LDC]) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/MultDecode.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/MultDecode.cs new file mode 100644 index 0000000000..e6df223c97 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/MultDecode.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal class MultDecode : Decode + { + internal MultDecode() + : base(new int[PackDef.MC20]) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/PackDef.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/PackDef.cs new file mode 100644 index 0000000000..87f1977ee2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/PackDef.cs @@ -0,0 +1,48 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal static class PackDef + { + // 20171217 NOTE: these contants are gone from unrar src code + // seems to be more dynamic + public const int MAXWINSIZE = 0x400000; + public const int MAXWINMASK = MAXWINSIZE - 1; + + public const uint MAX_LZ_MATCH = 0x1001; + public const uint MAX3_LZ_MATCH = 0x101; // Maximum match length for RAR v3. + public const int LOW_DIST_REP_COUNT = 16; + + public const int NC = 299; /* alphabet = {0, 1, 2, ..., NC - 1} */ + public const int DC = 60; + public const int LDC = 17; + public const int RC = 28; +// 20171217: NOTE: these constants seem to have been updated in the unrar src code +// at some unknown point. updating causes decompression failure, not sure why. +// public const int NC = 306; /* alphabet = {0, 1, 2, ..., NC - 1} */ +// public const int DC = 64; +// public const int LDC = 16; +// public const int RC = 44; + public const int HUFF_TABLE_SIZE = NC + DC + RC + LDC; + public const int BC = 20; + + public const uint NC30 = 299; /* alphabet = {0, 1, 2, ..., NC - 1} */ + public const uint DC30 = 60; + public const uint LDC30 = 17; + public const uint RC30 = 28; + public const uint BC30 = 20; + public const uint HUFF_TABLE_SIZE30 = NC30 + DC30 + RC30 + LDC30; + + public const int NC20 = 298; /* alphabet = {0, 1, 2, ..., NC - 1} */ + public const int DC20 = 48; + public const int RC20 = 28; + public const int BC20 = 19; + public const int MC20 = 257; + + // Largest alphabet size among all values listed above. + public const uint LARGEST_TABLE_SIZE = 306; + + //public enum { + // CODE_HUFFMAN, CODE_LZ, CODE_REPEATLZ, CODE_CACHELZ, CODE_STARTFILE, + // CODE_ENDFILE, CODE_FILTER, CODE_FILTERDATA + //} + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/RepDecode.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/RepDecode.cs new file mode 100644 index 0000000000..24d72d1646 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Decode/RepDecode.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.Decode +{ + internal class RepDecode : Decode + { + internal RepDecode() + : base(new int[PackDef.RC]) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/PPM/BlockTypes.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/PPM/BlockTypes.cs new file mode 100644 index 0000000000..b3f79fdcff --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/PPM/BlockTypes.cs @@ -0,0 +1,8 @@ +namespace SharpCompress.Compressors.Rar.UnpackV1.PPM +{ + internal enum BlockTypes + { + BLOCK_LZ = 0, + BLOCK_PPM = 1 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack.cs new file mode 100644 index 0000000000..ca81b905d7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack.cs @@ -0,0 +1,1270 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common; +using SharpCompress.Common.Rar.Headers; +using SharpCompress.Compressors.PPMd.H; +using SharpCompress.Compressors.Rar.UnpackV1.Decode; +using SharpCompress.Compressors.Rar.UnpackV1.PPM; +using SharpCompress.Compressors.Rar.VM; + +namespace SharpCompress.Compressors.Rar.UnpackV1 +{ + internal sealed partial class Unpack : BitInput, IRarUnpack + { + private readonly BitInput Inp; + + public Unpack() { + // to ease in porting Unpack50.cs + Inp = this; + } + + + public bool FileExtracted { get; private set; } + + public long DestSize + { + get => destUnpSize; + set + { + destUnpSize = value; + FileExtracted = false; + } + } + + public bool Suspended { + get => suspended; + set => suspended = value; + } + + public int Char + { + get + { + if (inAddr > MAX_SIZE - 30) + { + unpReadBuf(); + } + return (InBuf[inAddr++] & 0xff); + } + } + + public int PpmEscChar { get; set; } + + private readonly ModelPpm ppm = new ModelPpm(); + + private readonly RarVM rarVM = new RarVM(); + + // Filters code, one entry per filter + private readonly List filters = new List(); + + // Filters stack, several entrances of same filter are possible + private readonly List prgStack = new List(); + + // lengths of preceding blocks, one length per filter. Used to reduce size + // required to write block length if lengths are repeating + private readonly List oldFilterLengths = new List(); + + private int lastFilter; + + private bool tablesRead; + + private readonly byte[] unpOldTable = new byte[PackDef.HUFF_TABLE_SIZE]; + + private BlockTypes unpBlockType; + + //private bool externalWindow; + + private long writtenFileSize; + + private bool ppmError; + + private int prevLowDist; + + private int lowDistRepCount; + + private static readonly int[] DBitLengthCounts = {4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 14, 0, 12}; + + private FileHeader fileHeader; + + private void Init(byte[] window) + { + if (window == null) + { + this.window = new byte[PackDef.MAXWINSIZE]; + } + else + { + this.window = window; + + //externalWindow = true; + } + inAddr = 0; + UnpInitData(false); + } + + public void DoUnpack(FileHeader fileHeader, Stream readStream, Stream writeStream) + { + destUnpSize = fileHeader.UncompressedSize; + this.fileHeader = fileHeader; + this.readStream = readStream; + this.writeStream = writeStream; + if (!fileHeader.IsSolid) + { + Init(null); + } + suspended = false; + DoUnpack(); + } + + public void DoUnpack() + { + if (fileHeader.CompressionMethod == 0) + { + UnstoreFile(); + return; + } + switch (fileHeader.CompressionAlgorithm) + { + case 15: // rar 1.5 compression + unpack15(fileHeader.IsSolid); + break; + + case 20: // rar 2.x compression + case 26: // files larger than 2GB + unpack20(fileHeader.IsSolid); + break; + + case 29: // rar 3.x compression + case 36: // alternative hash + Unpack29(fileHeader.IsSolid); + break; + + case 50: // rar 5.x compression + Unpack5(fileHeader.IsSolid); + break; + + default: + throw new InvalidFormatException("unknown rar compression version " + fileHeader.CompressionAlgorithm); + } + } + + private void UnstoreFile() + { + byte[] buffer = new byte[0x10000]; + while (true) + { + int code = readStream.Read(buffer, 0, (int)Math.Min(buffer.Length, destUnpSize)); + if (code == 0 || code == -1) + { + break; + } + code = code < destUnpSize ? code : (int)destUnpSize; + writeStream.Write(buffer, 0, code); + if (destUnpSize >= 0) + { + destUnpSize -= code; + } + if (suspended) + { + return; + } + } + } + + private void Unpack29(bool solid) + { + int[] DDecode = new int[PackDef.DC]; + byte[] DBits = new byte[PackDef.DC]; + + int Bits; + + if (DDecode[1] == 0) + { + int Dist = 0, BitLength = 0, Slot = 0; + for (int I = 0; I < DBitLengthCounts.Length; I++, BitLength++) + { + int count = DBitLengthCounts[I]; + for (int J = 0; J < count; J++, Slot++, Dist += (1 << BitLength)) + { + DDecode[Slot] = Dist; + DBits[Slot] = (byte)BitLength; + } + } + } + + FileExtracted = true; + + if (!suspended) + { + UnpInitData(solid); + if (!unpReadBuf()) + { + return; + } + if ((!solid || !tablesRead) && !ReadTables()) + { + return; + } + } + + if (ppmError) + { + return; + } + + while (true) + { + unpPtr &= PackDef.MAXWINMASK; + + if (inAddr > readBorder) + { + if (!unpReadBuf()) + { + break; + } + } + + // System.out.println(((wrPtr - unpPtr) & + // Compress.MAXWINMASK)+":"+wrPtr+":"+unpPtr); + if (((wrPtr - unpPtr) & PackDef.MAXWINMASK) < 260 && wrPtr != unpPtr) + { + UnpWriteBuf(); + if (destUnpSize <= 0) + { + return; + } + if (suspended) + { + FileExtracted = false; + return; + } + } + if (unpBlockType == BlockTypes.BLOCK_PPM) + { + int Ch = ppm.DecodeChar(); + if (Ch == -1) + { + ppmError = true; + break; + } + if (Ch == PpmEscChar) + { + int NextCh = ppm.DecodeChar(); + if (NextCh == 0) + { + if (!ReadTables()) + { + break; + } + continue; + } + if (NextCh == 2 || NextCh == -1) + { + break; + } + if (NextCh == 3) + { + if (!ReadVMCodePPM()) + { + break; + } + continue; + } + if (NextCh == 4) + { + int Distance = 0, Length = 0; + bool failed = false; + for (int I = 0; I < 4 && !failed; I++) + { + int ch = ppm.DecodeChar(); + if (ch == -1) + { + failed = true; + } + else + { + if (I == 3) + { + // Bug fixed + Length = ch & 0xff; + } + else + { + // Bug fixed + Distance = (Distance << 8) + (ch & 0xff); + } + } + } + if (failed) + { + break; + } + CopyString(Length + 32, Distance + 2); + continue; + } + if (NextCh == 5) + { + int Length = ppm.DecodeChar(); + if (Length == -1) + { + break; + } + CopyString(Length + 4, 1); + continue; + } + } + window[unpPtr++] = (byte)Ch; + continue; + } + + int Number = this.decodeNumber(LD); + if (Number < 256) + { + window[unpPtr++] = (byte)Number; + continue; + } + if (Number >= 271) + { + int Length = LDecode[Number -= 271] + 3; + if ((Bits = LBits[Number]) > 0) + { + Length += Utility.URShift(GetBits(), (16 - Bits)); + AddBits(Bits); + } + + int DistNumber = this.decodeNumber(DD); + int Distance = DDecode[DistNumber] + 1; + if ((Bits = DBits[DistNumber]) > 0) + { + if (DistNumber > 9) + { + if (Bits > 4) + { + Distance += ((Utility.URShift(GetBits(), (20 - Bits))) << 4); + AddBits(Bits - 4); + } + if (lowDistRepCount > 0) + { + lowDistRepCount--; + Distance += prevLowDist; + } + else + { + int LowDist = this.decodeNumber(LDD); + if (LowDist == 16) + { + lowDistRepCount = PackDef.LOW_DIST_REP_COUNT - 1; + Distance += prevLowDist; + } + else + { + Distance += LowDist; + prevLowDist = LowDist; + } + } + } + else + { + Distance += Utility.URShift(GetBits(), (16 - Bits)); + AddBits(Bits); + } + } + + if (Distance >= 0x2000) + { + Length++; + if (Distance >= 0x40000L) + { + Length++; + } + } + + InsertOldDist(Distance); + InsertLastMatch(Length, Distance); + + CopyString(Length, Distance); + continue; + } + if (Number == 256) + { + if (!ReadEndOfBlock()) + { + break; + } + continue; + } + if (Number == 257) + { + if (!ReadVMCode()) + { + break; + } + continue; + } + if (Number == 258) + { + if (lastLength != 0) + { + CopyString(lastLength, lastDist); + } + continue; + } + if (Number < 263) + { + int DistNum = Number - 259; + int Distance = oldDist[DistNum]; + for (int I = DistNum; I > 0; I--) + { + oldDist[I] = oldDist[I - 1]; + } + oldDist[0] = Distance; + + int LengthNumber = this.decodeNumber(RD); + int Length = LDecode[LengthNumber] + 2; + if ((Bits = LBits[LengthNumber]) > 0) + { + Length += Utility.URShift(GetBits(), (16 - Bits)); + AddBits(Bits); + } + InsertLastMatch(Length, Distance); + CopyString(Length, Distance); + continue; + } + if (Number < 272) + { + int Distance = SDDecode[Number -= 263] + 1; + if ((Bits = SDBits[Number]) > 0) + { + Distance += Utility.URShift(GetBits(), (16 - Bits)); + AddBits(Bits); + } + InsertOldDist(Distance); + InsertLastMatch(2, Distance); + CopyString(2, Distance); + } + } + UnpWriteBuf(); + } + + private void UnpWriteBuf() + { + int WrittenBorder = wrPtr; + int WriteSize = (unpPtr - WrittenBorder) & PackDef.MAXWINMASK; + for (int I = 0; I < prgStack.Count; I++) + { + UnpackFilter flt = prgStack[I]; + if (flt == null) + { + continue; + } + if (flt.NextWindow) + { + flt.NextWindow = false; // ->NextWindow=false; + continue; + } + int BlockStart = flt.BlockStart; // ->BlockStart; + int BlockLength = flt.BlockLength; // ->BlockLength; + if (((BlockStart - WrittenBorder) & PackDef.MAXWINMASK) < WriteSize) + { + if (WrittenBorder != BlockStart) + { + UnpWriteArea(WrittenBorder, BlockStart); + WrittenBorder = BlockStart; + WriteSize = (unpPtr - WrittenBorder) & PackDef.MAXWINMASK; + } + if (BlockLength <= WriteSize) + { + int BlockEnd = (BlockStart + BlockLength) & PackDef.MAXWINMASK; + if (BlockStart < BlockEnd || BlockEnd == 0) + { + // VM.SetMemory(0,Window+BlockStart,BlockLength); + rarVM.setMemory(0, window, BlockStart, BlockLength); + } + else + { + int FirstPartLength = PackDef.MAXWINSIZE - BlockStart; + + // VM.SetMemory(0,Window+BlockStart,FirstPartLength); + rarVM.setMemory(0, window, BlockStart, FirstPartLength); + + // VM.SetMemory(FirstPartLength,Window,BlockEnd); + rarVM.setMemory(FirstPartLength, window, 0, BlockEnd); + } + + VMPreparedProgram ParentPrg = filters[flt.ParentFilter].Program; + VMPreparedProgram Prg = flt.Program; + + if (ParentPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE) + { + // copy global data from previous script execution if + // any + // Prg->GlobalData.Alloc(ParentPrg->GlobalData.Size()); + // memcpy(&Prg->GlobalData[VM_FIXEDGLOBALSIZE],&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],ParentPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE); + Prg.GlobalData.Clear(); + for (int i = 0; i < ParentPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE; i++) + { + Prg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] = + ParentPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i]; + } + } + + ExecuteCode(Prg); + + if (Prg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE) + { + // save global data for next script execution + if (ParentPrg.GlobalData.Count < Prg.GlobalData.Count) + { + //ParentPrg.GlobalData.Clear(); // ->GlobalData.Alloc(Prg->GlobalData.Size()); + ParentPrg.GlobalData.SetSize(Prg.GlobalData.Count); + } + + // memcpy(&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],&Prg->GlobalData[VM_FIXEDGLOBALSIZE],Prg->GlobalData.Size()-VM_FIXEDGLOBALSIZE); + for (int i = 0; i < Prg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE; i++) + { + ParentPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] = + Prg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i]; + } + } + else + { + ParentPrg.GlobalData.Clear(); + } + + int FilteredDataOffset = Prg.FilteredDataOffset; + int FilteredDataSize = Prg.FilteredDataSize; + byte[] FilteredData = new byte[FilteredDataSize]; + + for (int i = 0; i < FilteredDataSize; i++) + { + FilteredData[i] = rarVM.Mem[FilteredDataOffset + i]; + + // Prg.GlobalData.get(FilteredDataOffset + // + + // i); + } + + prgStack[I] = null; + while (I + 1 < prgStack.Count) + { + UnpackFilter NextFilter = prgStack[I + 1]; + if (NextFilter == null || NextFilter.BlockStart != BlockStart || + NextFilter.BlockLength != FilteredDataSize || NextFilter.NextWindow) + { + break; + } + + // apply several filters to same data block + + rarVM.setMemory(0, FilteredData, 0, FilteredDataSize); + + // .SetMemory(0,FilteredData,FilteredDataSize); + + VMPreparedProgram pPrg = filters[NextFilter.ParentFilter].Program; + VMPreparedProgram NextPrg = NextFilter.Program; + + if (pPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE) + { + // copy global data from previous script execution + // if any + // NextPrg->GlobalData.Alloc(ParentPrg->GlobalData.Size()); + NextPrg.GlobalData.SetSize(pPrg.GlobalData.Count); + + // memcpy(&NextPrg->GlobalData[VM_FIXEDGLOBALSIZE],&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],ParentPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE); + for (int i = 0; i < pPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE; i++) + { + NextPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] = + pPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i]; + } + } + + ExecuteCode(NextPrg); + + if (NextPrg.GlobalData.Count > RarVM.VM_FIXEDGLOBALSIZE) + { + // save global data for next script execution + if (pPrg.GlobalData.Count < NextPrg.GlobalData.Count) + { + pPrg.GlobalData.SetSize(NextPrg.GlobalData.Count); + } + + // memcpy(&ParentPrg->GlobalData[VM_FIXEDGLOBALSIZE],&NextPrg->GlobalData[VM_FIXEDGLOBALSIZE],NextPrg->GlobalData.Size()-VM_FIXEDGLOBALSIZE); + for (int i = 0; i < NextPrg.GlobalData.Count - RarVM.VM_FIXEDGLOBALSIZE; i++) + { + pPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i] = + NextPrg.GlobalData[RarVM.VM_FIXEDGLOBALSIZE + i]; + } + } + else + { + pPrg.GlobalData.Clear(); + } + FilteredDataOffset = NextPrg.FilteredDataOffset; + FilteredDataSize = NextPrg.FilteredDataSize; + + FilteredData = new byte[FilteredDataSize]; + for (int i = 0; i < FilteredDataSize; i++) + { + FilteredData[i] = NextPrg.GlobalData[FilteredDataOffset + i]; + } + + I++; + prgStack[I] = null; + } + writeStream.Write(FilteredData, 0, FilteredDataSize); + unpSomeRead = true; + writtenFileSize += FilteredDataSize; + destUnpSize -= FilteredDataSize; + WrittenBorder = BlockEnd; + WriteSize = (unpPtr - WrittenBorder) & PackDef.MAXWINMASK; + } + else + { + for (int J = I; J < prgStack.Count; J++) + { + UnpackFilter filt = prgStack[J]; + if (filt != null && filt.NextWindow) + { + filt.NextWindow = false; + } + } + wrPtr = WrittenBorder; + return; + } + } + } + + UnpWriteArea(WrittenBorder, unpPtr); + wrPtr = unpPtr; + } + + private void UnpWriteArea(int startPtr, int endPtr) + { + if (endPtr != startPtr) + { + unpSomeRead = true; + } + if (endPtr < startPtr) + { + UnpWriteData(window, startPtr, -startPtr & PackDef.MAXWINMASK); + UnpWriteData(window, 0, endPtr); + unpAllBuf = true; + } + else + { + UnpWriteData(window, startPtr, endPtr - startPtr); + } + } + + private void UnpWriteData(byte[] data, int offset, int size) + { + if (destUnpSize <= 0) + { + return; + } + int writeSize = size; + if (writeSize > destUnpSize) + { + writeSize = (int)destUnpSize; + } + writeStream.Write(data, offset, writeSize); + + writtenFileSize += size; + destUnpSize -= size; + } + + private void InsertOldDist(uint distance) { + // TODO uint + InsertOldDist((int)distance); + } + + private void InsertOldDist(int distance) + { + oldDist[3] = oldDist[2]; + oldDist[2] = oldDist[1]; + oldDist[1] = oldDist[0]; + oldDist[0] = distance; + } + + private void InsertLastMatch(int length, int distance) + { + lastDist = distance; + lastLength = length; + } + + private void CopyString(uint length, uint distance) { + // TODO uint + CopyString((int)length, (int)distance) ; + } + + private void CopyString(int length, int distance) + { + // System.out.println("copyString(" + length + ", " + distance + ")"); + + int destPtr = unpPtr - distance; + + // System.out.println(unpPtr+":"+distance); + if (destPtr >= 0 && destPtr < PackDef.MAXWINSIZE - 260 && unpPtr < PackDef.MAXWINSIZE - 260) + { + window[unpPtr++] = window[destPtr++]; + + while (--length > 0) + { + window[unpPtr++] = window[destPtr++]; + } + } + else + { + while (length-- != 0) + { + window[unpPtr] = window[destPtr++ & PackDef.MAXWINMASK]; + unpPtr = (unpPtr + 1) & PackDef.MAXWINMASK; + } + } + } + + private void UnpInitData(bool solid) + { + if (!solid) + { + tablesRead = false; + Utility.Fill(oldDist, 0); // memset(oldDist,0,sizeof(OldDist)); + + oldDistPtr = 0; + lastDist = 0; + lastLength = 0; + + Utility.Fill(unpOldTable, (byte)0); // memset(UnpOldTable,0,sizeof(UnpOldTable)); + + unpPtr = 0; + wrPtr = 0; + PpmEscChar = 2; +WriteBorder=Math.Min(MaxWinSize,UNPACK_MAX_WRITE)&MaxWinMask; + + InitFilters(); + } + InitBitInput(); + ppmError = false; + writtenFileSize = 0; + readTop = 0; + readBorder = 0; + unpInitData20(solid); + } + +//void Unpack::UnpInitData(bool Solid) +//{ +// if (!Solid) +// { +// memset(OldDist,0,sizeof(OldDist)); +// OldDistPtr=0; +// LastDist=LastLength=0; +//// memset(Window,0,MaxWinSize); +// memset(&BlockTables,0,sizeof(BlockTables)); +// UnpPtr=WrPtr=0; +// WriteBorder=Min(MaxWinSize,UNPACK_MAX_WRITE)&MaxWinMask; +// } +// // Filters never share several solid files, so we can safely reset them +// // even in solid archive. +// InitFilters(); +// +// Inp.InitBitInput(); +// WrittenFileSize=0; +// ReadTop=0; +// ReadBorder=0; +// +// memset(&BlockHeader,0,sizeof(BlockHeader)); +// BlockHeader.BlockSize=-1; // '-1' means not defined yet. +//#ifndef SFX_MODULE +// UnpInitData20(Solid); +//#endif +// UnpInitData30(Solid); +// UnpInitData50(Solid); +//} + + private void InitFilters() + { + oldFilterLengths.Clear(); + lastFilter = 0; + + filters.Clear(); + + prgStack.Clear(); + } + + private bool ReadEndOfBlock() + { + int BitField = GetBits(); + bool NewTable, NewFile = false; + if ((BitField & 0x8000) != 0) + { + NewTable = true; + AddBits(1); + } + else + { + NewFile = true; + NewTable = (BitField & 0x4000) != 0; + AddBits(2); + } + tablesRead = !NewTable; + return !(NewFile || NewTable && !ReadTables()); + } + + private bool ReadTables() + { + byte[] bitLength = new byte[PackDef.BC]; + + byte[] table = new byte[PackDef.HUFF_TABLE_SIZE]; + if (inAddr > readTop - 25) + { + if (!unpReadBuf()) + { + return (false); + } + } + AddBits((8 - inBit) & 7); + long bitField = GetBits() & unchecked((int)0xffFFffFF); + if ((bitField & 0x8000) != 0) + { + unpBlockType = BlockTypes.BLOCK_PPM; + return (ppm.DecodeInit(this, PpmEscChar)); + } + unpBlockType = BlockTypes.BLOCK_LZ; + + prevLowDist = 0; + lowDistRepCount = 0; + + if ((bitField & 0x4000) == 0) + { + Utility.Fill(unpOldTable, (byte)0); // memset(UnpOldTable,0,sizeof(UnpOldTable)); + } + AddBits(2); + + for (int i = 0; i < PackDef.BC; i++) + { + int length = (Utility.URShift(GetBits(), 12)) & 0xFF; + AddBits(4); + if (length == 15) + { + int zeroCount = (Utility.URShift(GetBits(), 12)) & 0xFF; + AddBits(4); + if (zeroCount == 0) + { + bitLength[i] = 15; + } + else + { + zeroCount += 2; + while (zeroCount-- > 0 && i < bitLength.Length) + { + bitLength[i++] = 0; + } + i--; + } + } + else + { + bitLength[i] = (byte)length; + } + } + + UnpackUtility.makeDecodeTables(bitLength, 0, BD, PackDef.BC); + + int TableSize = PackDef.HUFF_TABLE_SIZE; + + for (int i = 0; i < TableSize;) + { + if (inAddr > readTop - 5) + { + if (!unpReadBuf()) + { + return (false); + } + } + int Number = this.decodeNumber(BD); + if (Number < 16) + { + table[i] = (byte)((Number + unpOldTable[i]) & 0xf); + i++; + } + else if (Number < 18) + { + int N; + if (Number == 16) + { + N = (Utility.URShift(GetBits(), 13)) + 3; + AddBits(3); + } + else + { + N = (Utility.URShift(GetBits(), 9)) + 11; + AddBits(7); + } + while (N-- > 0 && i < TableSize) + { + table[i] = table[i - 1]; + i++; + } + } + else + { + int N; + if (Number == 18) + { + N = (Utility.URShift(GetBits(), 13)) + 3; + AddBits(3); + } + else + { + N = (Utility.URShift(GetBits(), 9)) + 11; + AddBits(7); + } + while (N-- > 0 && i < TableSize) + { + table[i++] = 0; + } + } + } + tablesRead = true; + if (inAddr > readTop) + { + return (false); + } + UnpackUtility.makeDecodeTables(table, 0, LD, PackDef.NC); + UnpackUtility.makeDecodeTables(table, PackDef.NC, DD, PackDef.DC); + UnpackUtility.makeDecodeTables(table, PackDef.NC + PackDef.DC, LDD, PackDef.LDC); + UnpackUtility.makeDecodeTables(table, PackDef.NC + PackDef.DC + PackDef.LDC, RD, PackDef.RC); + + // memcpy(unpOldTable,table,sizeof(unpOldTable)); + + Buffer.BlockCopy(table, 0, unpOldTable, 0, unpOldTable.Length); + return (true); + } + + private bool ReadVMCode() + { + int FirstByte = GetBits() >> 8; + AddBits(8); + int Length = (FirstByte & 7) + 1; + if (Length == 7) + { + Length = (GetBits() >> 8) + 7; + AddBits(8); + } + else if (Length == 8) + { + Length = GetBits(); + AddBits(16); + } + + List vmCode = new List(); + for (int I = 0; I < Length; I++) + { + if (inAddr >= readTop - 1 && !unpReadBuf() && I < Length - 1) + { + return (false); + } + vmCode.Add((byte)(GetBits() >> 8)); + AddBits(8); + } + return (AddVMCode(FirstByte, vmCode, Length)); + } + + private bool ReadVMCodePPM() + { + int FirstByte = ppm.DecodeChar(); + if (FirstByte == -1) + { + return (false); + } + int Length = (FirstByte & 7) + 1; + if (Length == 7) + { + int B1 = ppm.DecodeChar(); + if (B1 == -1) + { + return (false); + } + Length = B1 + 7; + } + else if (Length == 8) + { + int B1 = ppm.DecodeChar(); + if (B1 == -1) + { + return (false); + } + int B2 = ppm.DecodeChar(); + if (B2 == -1) + { + return (false); + } + Length = B1 * 256 + B2; + } + + List vmCode = new List(); + for (int I = 0; I < Length; I++) + { + int Ch = ppm.DecodeChar(); + if (Ch == -1) + { + return (false); + } + vmCode.Add((byte)Ch); // VMCode[I]=Ch; + } + return (AddVMCode(FirstByte, vmCode, Length)); + } + + private bool AddVMCode(int firstByte, List vmCode, int length) + { + BitInput Inp = new BitInput(); + Inp.InitBitInput(); + + // memcpy(Inp.InBuf,Code,Min(BitInput::MAX_SIZE,CodeSize)); + for (int i = 0; i < Math.Min(MAX_SIZE, vmCode.Count); i++) + { + Inp.InBuf[i] = vmCode[i]; + } + rarVM.init(); + + int FiltPos; + if ((firstByte & 0x80) != 0) + { + FiltPos = RarVM.ReadData(Inp); + if (FiltPos == 0) + { + InitFilters(); + } + else + { + FiltPos--; + } + } + else + { + FiltPos = lastFilter; // use the same filter as last time + } + + if (FiltPos > filters.Count || FiltPos > oldFilterLengths.Count) + { + return (false); + } + lastFilter = FiltPos; + bool NewFilter = (FiltPos == filters.Count); + + UnpackFilter StackFilter = new UnpackFilter(); // new filter for + + // PrgStack + + UnpackFilter Filter; + if (NewFilter) + + // new filter code, never used before since VM reset + { + // too many different filters, corrupt archive + if (FiltPos > 1024) + { + return (false); + } + + // Filters[Filters.Size()-1]=Filter=new UnpackFilter; + Filter = new UnpackFilter(); + filters.Add(Filter); + StackFilter.ParentFilter = filters.Count - 1; + oldFilterLengths.Add(0); + Filter.ExecCount = 0; + } + + // filter was used in the past + else + { + Filter = filters[FiltPos]; + StackFilter.ParentFilter = FiltPos; + Filter.ExecCount = Filter.ExecCount + 1; // ->ExecCount++; + } + + prgStack.Add(StackFilter); + StackFilter.ExecCount = Filter.ExecCount; // ->ExecCount; + + int BlockStart = RarVM.ReadData(Inp); + if ((firstByte & 0x40) != 0) + { + BlockStart += 258; + } + StackFilter.BlockStart = ((BlockStart + unpPtr) & PackDef.MAXWINMASK); + if ((firstByte & 0x20) != 0) + { + StackFilter.BlockLength = RarVM.ReadData(Inp); + } + else + { + StackFilter.BlockLength = FiltPos < oldFilterLengths.Count ? oldFilterLengths[FiltPos] : 0; + } + StackFilter.NextWindow = (wrPtr != unpPtr) && ((wrPtr - unpPtr) & PackDef.MAXWINMASK) <= BlockStart; + + // DebugLog("\nNextWindow: UnpPtr=%08x WrPtr=%08x + // BlockStart=%08x",UnpPtr,WrPtr,BlockStart); + + oldFilterLengths[FiltPos] = StackFilter.BlockLength; + + // memset(StackFilter->Prg.InitR,0,sizeof(StackFilter->Prg.InitR)); + Utility.Fill(StackFilter.Program.InitR, 0); + StackFilter.Program.InitR[3] = RarVM.VM_GLOBALMEMADDR; // StackFilter->Prg.InitR[3]=VM_GLOBALMEMADDR; + StackFilter.Program.InitR[4] = StackFilter.BlockLength; + + // StackFilter->Prg.InitR[4]=StackFilter->BlockLength; + StackFilter.Program.InitR[5] = StackFilter.ExecCount; // StackFilter->Prg.InitR[5]=StackFilter->ExecCount; + + if ((firstByte & 0x10) != 0) + + // set registers to optional parameters + // if any + { + int InitMask = Utility.URShift(Inp.GetBits(), 9); + Inp.AddBits(7); + for (int I = 0; I < 7; I++) + { + if ((InitMask & (1 << I)) != 0) + { + // StackFilter->Prg.InitR[I]=RarVM::ReadData(Inp); + StackFilter.Program.InitR[I] = RarVM.ReadData(Inp); + } + } + } + + if (NewFilter) + { + int VMCodeSize = RarVM.ReadData(Inp); + if (VMCodeSize >= 0x10000 || VMCodeSize == 0) + { + return (false); + } + byte[] VMCode = new byte[VMCodeSize]; + for (int I = 0; I < VMCodeSize; I++) + { + if (Inp.Overflow(3)) + { + return (false); + } + VMCode[I] = (byte)(Inp.GetBits() >> 8); + Inp.AddBits(8); + } + + // VM.Prepare(&VMCode[0],VMCodeSize,&Filter->Prg); + rarVM.prepare(VMCode, VMCodeSize, Filter.Program); + } + StackFilter.Program.AltCommands = Filter.Program.Commands; // StackFilter->Prg.AltCmd=&Filter->Prg.Cmd[0]; + StackFilter.Program.CommandCount = Filter.Program.CommandCount; + + // StackFilter->Prg.CmdCount=Filter->Prg.CmdCount; + + int StaticDataSize = Filter.Program.StaticData.Count; + if (StaticDataSize > 0 && StaticDataSize < RarVM.VM_GLOBALMEMSIZE) + { + // read statically defined data contained in DB commands + // StackFilter->Prg.StaticData.Add(StaticDataSize); + StackFilter.Program.StaticData = Filter.Program.StaticData; + + // memcpy(&StackFilter->Prg.StaticData[0],&Filter->Prg.StaticData[0],StaticDataSize); + } + + if (StackFilter.Program.GlobalData.Count < RarVM.VM_FIXEDGLOBALSIZE) + { + // StackFilter->Prg.GlobalData.Reset(); + // StackFilter->Prg.GlobalData.Add(VM_FIXEDGLOBALSIZE); + StackFilter.Program.GlobalData.Clear(); + StackFilter.Program.GlobalData.SetSize(RarVM.VM_FIXEDGLOBALSIZE); + } + + // byte *GlobalData=&StackFilter->Prg.GlobalData[0]; + + List globalData = StackFilter.Program.GlobalData; + for (int I = 0; I < 7; I++) + { + rarVM.SetLowEndianValue(globalData, I * 4, StackFilter.Program.InitR[I]); + } + + // VM.SetLowEndianValue((uint + // *)&GlobalData[0x1c],StackFilter->BlockLength); + rarVM.SetLowEndianValue(globalData, 0x1c, StackFilter.BlockLength); + + // VM.SetLowEndianValue((uint *)&GlobalData[0x20],0); + rarVM.SetLowEndianValue(globalData, 0x20, 0); + rarVM.SetLowEndianValue(globalData, 0x24, 0); + rarVM.SetLowEndianValue(globalData, 0x28, 0); + + // VM.SetLowEndianValue((uint + // *)&GlobalData[0x2c],StackFilter->ExecCount); + rarVM.SetLowEndianValue(globalData, 0x2c, StackFilter.ExecCount); + + // memset(&GlobalData[0x30],0,16); + for (int i = 0; i < 16; i++) + { + globalData[0x30 + i] = 0x0; + } + if ((firstByte & 8) != 0) + + // put data block passed as parameter if any + { + if (Inp.Overflow(3)) + { + return (false); + } + int DataSize = RarVM.ReadData(Inp); + if (DataSize > RarVM.VM_GLOBALMEMSIZE - RarVM.VM_FIXEDGLOBALSIZE) + { + return (false); + } + int CurSize = StackFilter.Program.GlobalData.Count; + if (CurSize < DataSize + RarVM.VM_FIXEDGLOBALSIZE) + { + // StackFilter->Prg.GlobalData.Add(DataSize+VM_FIXEDGLOBALSIZE-CurSize); + StackFilter.Program.GlobalData.SetSize(DataSize + RarVM.VM_FIXEDGLOBALSIZE - CurSize); + } + int offset = RarVM.VM_FIXEDGLOBALSIZE; + globalData = StackFilter.Program.GlobalData; + for (int I = 0; I < DataSize; I++) + { + if (Inp.Overflow(3)) + { + return (false); + } + globalData[offset + I] = (byte)(Utility.URShift(Inp.GetBits(), 8)); + Inp.AddBits(8); + } + } + return (true); + } + + private void ExecuteCode(VMPreparedProgram Prg) + { + if (Prg.GlobalData.Count > 0) + { + // Prg->InitR[6]=int64to32(WrittenFileSize); + Prg.InitR[6] = (int)(writtenFileSize); + + // rarVM.SetLowEndianValue((uint + // *)&Prg->GlobalData[0x24],int64to32(WrittenFileSize)); + rarVM.SetLowEndianValue(Prg.GlobalData, 0x24, (int)writtenFileSize); + + // rarVM.SetLowEndianValue((uint + // *)&Prg->GlobalData[0x28],int64to32(WrittenFileSize>>32)); + rarVM.SetLowEndianValue(Prg.GlobalData, 0x28, (int)(Utility.URShift(writtenFileSize, 32))); + rarVM.execute(Prg); + } + } + + private void CleanUp() + { + if (ppm != null) + { + SubAllocator allocator = ppm.SubAlloc; + if (allocator != null) + { + allocator.StopSubAllocator(); + } + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack15.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack15.cs new file mode 100644 index 0000000000..6c0e09f398 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack15.cs @@ -0,0 +1,720 @@ +/* +* Copyright (c) 2007 innoSysTec (R) GmbH, Germany. All rights reserved. +* Original author: Edmund Wagner +* Creation date: 21.06.2007 +* +* the unrar licence applies to all junrar source and binary distributions +* you are not allowed to use this source to re-create the RAR compression algorithm +*/ + +using System; +using System.IO; +using SharpCompress.Compressors.Rar.UnpackV1.Decode; + +namespace SharpCompress.Compressors.Rar.UnpackV1 +{ + internal partial class Unpack + { + private int readBorder; + + private bool suspended; + + internal bool unpAllBuf; + + //private ComprDataIO unpIO; + private Stream readStream; + private Stream writeStream; + + internal bool unpSomeRead; + + private int readTop; + + private long destUnpSize; + + private byte[] window; + + private readonly int[] oldDist = new int[4]; + + private int unpPtr, wrPtr; + + private int oldDistPtr; + + private readonly int[] ChSet = new int[256], + ChSetA = new int[256], + ChSetB = new int[256], + ChSetC = new int[256]; + + private readonly int[] Place = new int[256], + PlaceA = new int[256], + PlaceB = new int[256], + PlaceC = new int[256]; + + private readonly int[] NToPl = new int[256], NToPlB = new int[256], NToPlC = new int[256]; + + private int FlagBuf, AvrPlc, AvrPlcB, AvrLn1, AvrLn2, AvrLn3; + + private int Buf60, NumHuf, StMode, LCount, FlagsCnt; + + private int Nhfb, Nlzb, MaxDist3; + + private int lastDist, lastLength; + + private const int STARTL1 = 2; + + private static readonly int[] DecL1 = + { + 0x8000, 0xa000, 0xc000, 0xd000, 0xe000, 0xea00, 0xee00, 0xf000, 0xf200, 0xf200 + , 0xffff + }; + + private static readonly int[] PosL1 = {0, 0, 0, 2, 3, 5, 7, 11, 16, 20, 24, 32, 32}; + + private const int STARTL2 = 3; + + private static readonly int[] DecL2 = + { + 0xa000, 0xc000, 0xd000, 0xe000, 0xea00, 0xee00, 0xf000, 0xf200, 0xf240, 0xffff + }; + + private static readonly int[] PosL2 = {0, 0, 0, 0, 5, 7, 9, 13, 18, 22, 26, 34, 36}; + + private const int STARTHF0 = 4; + + private static readonly int[] DecHf0 = {0x8000, 0xc000, 0xe000, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xffff}; + + private static readonly int[] PosHf0 = {0, 0, 0, 0, 0, 8, 16, 24, 33, 33, 33, 33, 33}; + + private const int STARTHF1 = 5; + + private static readonly int[] DecHf1 = {0x2000, 0xc000, 0xe000, 0xf000, 0xf200, 0xf200, 0xf7e0, 0xffff}; + + private static readonly int[] PosHf1 = {0, 0, 0, 0, 0, 0, 4, 44, 60, 76, 80, 80, 127}; + + private const int STARTHF2 = 5; + + private static readonly int[] DecHf2 = {0x1000, 0x2400, 0x8000, 0xc000, 0xfa00, 0xffff, 0xffff, 0xffff}; + + private static readonly int[] PosHf2 = {0, 0, 0, 0, 0, 0, 2, 7, 53, 117, 233, 0, 0}; + + private const int STARTHF3 = 6; + + private static readonly int[] DecHf3 = {0x800, 0x2400, 0xee00, 0xfe80, 0xffff, 0xffff, 0xffff}; + + private static readonly int[] PosHf3 = {0, 0, 0, 0, 0, 0, 0, 2, 16, 218, 251, 0, 0}; + + private const int STARTHF4 = 8; + + private static readonly int[] DecHf4 = {0xff00, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}; + + private static readonly int[] PosHf4 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 0, 0}; + + private static readonly int[] ShortLen1 = {1, 3, 4, 4, 5, 6, 7, 8, 8, 4, 4, 5, 6, 6, 4, 0}; + + private static readonly int[] ShortXor1 = + { + 0, 0xa0, 0xd0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff, 0xc0, 0x80, 0x90, 0x98 + , 0x9c, 0xb0 + }; + + private static readonly int[] ShortLen2 = {2, 3, 3, 3, 4, 4, 5, 6, 6, 4, 4, 5, 6, 6, 4, 0}; + + private static readonly int[] ShortXor2 = + { + 0, 0x40, 0x60, 0xa0, 0xd0, 0xe0, 0xf0, 0xf8, 0xfc, 0xc0, 0x80, 0x90, 0x98 + , 0x9c, 0xb0 + }; + + private void unpack15(bool solid) + { + if (suspended) + { + unpPtr = wrPtr; + } + else + { + UnpInitData(solid); + oldUnpInitData(solid); + unpReadBuf(); + if (!solid) + { + initHuff(); + unpPtr = 0; + } + else + { + unpPtr = wrPtr; + } + --destUnpSize; + } + if (destUnpSize >= 0) + { + getFlagsBuf(); + FlagsCnt = 8; + } + + while (destUnpSize >= 0) + { + unpPtr &= PackDef.MAXWINMASK; + + if (inAddr > readTop - 30 && !unpReadBuf()) + { + break; + } + if (((wrPtr - unpPtr) & PackDef.MAXWINMASK) < 270 && wrPtr != unpPtr) + { + oldUnpWriteBuf(); + if (suspended) + { + return; + } + } + if (StMode != 0) + { + huffDecode(); + continue; + } + + if (--FlagsCnt < 0) + { + getFlagsBuf(); + FlagsCnt = 7; + } + + if ((FlagBuf & 0x80) != 0) + { + FlagBuf <<= 1; + if (Nlzb > Nhfb) + { + longLZ(); + } + else + { + huffDecode(); + } + } + else + { + FlagBuf <<= 1; + if (--FlagsCnt < 0) + { + getFlagsBuf(); + FlagsCnt = 7; + } + if ((FlagBuf & 0x80) != 0) + { + FlagBuf <<= 1; + if (Nlzb > Nhfb) + { + huffDecode(); + } + else + { + longLZ(); + } + } + else + { + FlagBuf <<= 1; + shortLZ(); + } + } + } + oldUnpWriteBuf(); + } + + private bool unpReadBuf() + { + int dataSize = readTop - inAddr; + if (dataSize < 0) + { + return (false); + } + if (inAddr > MAX_SIZE / 2) + { + if (dataSize > 0) + { + //memmove(InBuf,InBuf+InAddr,DataSize); + // for (int i = 0; i < dataSize; i++) { + // inBuf[i] = inBuf[inAddr + i]; + // } + Array.Copy(InBuf, inAddr, InBuf, 0, dataSize); + } + inAddr = 0; + readTop = dataSize; + } + else + { + dataSize = readTop; + } + + //int readCode=UnpIO->UnpRead(InBuf+DataSize,(BitInput::MAX_SIZE-DataSize)&~0xf); + int readCode = readStream.Read(InBuf, dataSize, (MAX_SIZE - dataSize) & ~0xf); + if (readCode > 0) + { + readTop += readCode; + } + readBorder = readTop - 30; + return (readCode != -1); + } + + private int getShortLen1(int pos) + { + return pos == 1 ? Buf60 + 3 : ShortLen1[pos]; + } + + private int getShortLen2(int pos) + { + return pos == 3 ? Buf60 + 3 : ShortLen2[pos]; + } + + private void shortLZ() + { + int Length, SaveLength; + int LastDistance; + int Distance; + int DistancePlace; + NumHuf = 0; + + int BitField = GetBits(); + if (LCount == 2) + { + AddBits(1); + if (BitField >= 0x8000) + { + oldCopyString(lastDist, lastLength); + return; + } + BitField <<= 1; + LCount = 0; + } + BitField = Utility.URShift(BitField, 8); + if (AvrLn1 < 37) + { + for (Length = 0;; Length++) + { + if (((BitField ^ ShortXor1[Length]) & (~(Utility.URShift(0xff, getShortLen1(Length))))) == 0) + { + break; + } + } + AddBits(getShortLen1(Length)); + } + else + { + for (Length = 0;; Length++) + { + if (((BitField ^ ShortXor2[Length]) & (~(0xff >> getShortLen2(Length)))) == 0) + { + break; + } + } + AddBits(getShortLen2(Length)); + } + + if (Length >= 9) + { + if (Length == 9) + { + LCount++; + oldCopyString(lastDist, lastLength); + return; + } + if (Length == 14) + { + LCount = 0; + Length = decodeNum(GetBits(), STARTL2, DecL2, PosL2) + 5; + Distance = (GetBits() >> 1) | 0x8000; + AddBits(15); + lastLength = Length; + lastDist = Distance; + oldCopyString(Distance, Length); + return; + } + + LCount = 0; + SaveLength = Length; + Distance = oldDist[(oldDistPtr - (Length - 9)) & 3]; + Length = decodeNum(GetBits(), STARTL1, DecL1, PosL1) + 2; + if (Length == 0x101 && SaveLength == 10) + { + Buf60 ^= 1; + return; + } + if (Distance > 256) + { + Length++; + } + if (Distance >= MaxDist3) + { + Length++; + } + + oldDist[oldDistPtr++] = Distance; + oldDistPtr = oldDistPtr & 3; + lastLength = Length; + lastDist = Distance; + oldCopyString(Distance, Length); + return; + } + + LCount = 0; + AvrLn1 += Length; + AvrLn1 -= (AvrLn1 >> 4); + + DistancePlace = decodeNum(GetBits(), STARTHF2, DecHf2, PosHf2) & 0xff; + Distance = ChSetA[DistancePlace]; + if (--DistancePlace != -1) + { + PlaceA[Distance]--; + LastDistance = ChSetA[DistancePlace]; + PlaceA[LastDistance]++; + ChSetA[DistancePlace + 1] = LastDistance; + ChSetA[DistancePlace] = Distance; + } + Length += 2; + oldDist[oldDistPtr++] = ++Distance; + oldDistPtr = oldDistPtr & 3; + lastLength = Length; + lastDist = Distance; + oldCopyString(Distance, Length); + } + + private void longLZ() + { + int Length; + int Distance; + int DistancePlace, NewDistancePlace; + int OldAvr2, OldAvr3; + + NumHuf = 0; + Nlzb += 16; + if (Nlzb > 0xff) + { + Nlzb = 0x90; + Nhfb = Utility.URShift(Nhfb, 1); + } + OldAvr2 = AvrLn2; + + int BitField = GetBits(); + if (AvrLn2 >= 122) + { + Length = decodeNum(BitField, STARTL2, DecL2, PosL2); + } + else + { + if (AvrLn2 >= 64) + { + Length = decodeNum(BitField, STARTL1, DecL1, PosL1); + } + else + { + if (BitField < 0x100) + { + Length = BitField; + AddBits(16); + } + else + { + for (Length = 0; ((BitField << Length) & 0x8000) == 0; Length++) + { + ; + } + AddBits(Length + 1); + } + } + } + AvrLn2 += Length; + AvrLn2 -= Utility.URShift(AvrLn2, 5); + + BitField = GetBits(); + if (AvrPlcB > 0x28ff) + { + DistancePlace = decodeNum(BitField, STARTHF2, DecHf2, PosHf2); + } + else + { + if (AvrPlcB > 0x6ff) + { + DistancePlace = decodeNum(BitField, STARTHF1, DecHf1, PosHf1); + } + else + { + DistancePlace = decodeNum(BitField, STARTHF0, DecHf0, PosHf0); + } + } + AvrPlcB += DistancePlace; + AvrPlcB -= (AvrPlcB >> 8); + while (true) + { + Distance = ChSetB[DistancePlace & 0xff]; + NewDistancePlace = NToPlB[Distance++ & 0xff]++; + if ((Distance & 0xff) == 0) + { + corrHuff(ChSetB, NToPlB); + } + else + { + break; + } + } + + ChSetB[DistancePlace] = ChSetB[NewDistancePlace]; + ChSetB[NewDistancePlace] = Distance; + + Distance = Utility.URShift(((Distance & 0xff00) | (Utility.URShift(GetBits(), 8))), 1); + AddBits(7); + + OldAvr3 = AvrLn3; + if (Length != 1 && Length != 4) + { + if (Length == 0 && Distance <= MaxDist3) + { + AvrLn3++; + AvrLn3 -= (AvrLn3 >> 8); + } + else + { + if (AvrLn3 > 0) + { + AvrLn3--; + } + } + } + Length += 3; + if (Distance >= MaxDist3) + { + Length++; + } + if (Distance <= 256) + { + Length += 8; + } + if (OldAvr3 > 0xb0 || AvrPlc >= 0x2a00 && OldAvr2 < 0x40) + { + MaxDist3 = 0x7f00; + } + else + { + MaxDist3 = 0x2001; + } + oldDist[oldDistPtr++] = Distance; + oldDistPtr = oldDistPtr & 3; + lastLength = Length; + lastDist = Distance; + oldCopyString(Distance, Length); + } + + private void huffDecode() + { + int CurByte, NewBytePlace; + int Length; + int Distance; + int BytePlace; + + int BitField = GetBits(); + + if (AvrPlc > 0x75ff) + { + BytePlace = decodeNum(BitField, STARTHF4, DecHf4, PosHf4); + } + else + { + if (AvrPlc > 0x5dff) + { + BytePlace = decodeNum(BitField, STARTHF3, DecHf3, PosHf3); + } + else + { + if (AvrPlc > 0x35ff) + { + BytePlace = decodeNum(BitField, STARTHF2, DecHf2, PosHf2); + } + else + { + if (AvrPlc > 0x0dff) + { + BytePlace = decodeNum(BitField, STARTHF1, DecHf1, PosHf1); + } + else + { + BytePlace = decodeNum(BitField, STARTHF0, DecHf0, PosHf0); + } + } + } + } + BytePlace &= 0xff; + if (StMode != 0) + { + if (BytePlace == 0 && BitField > 0xfff) + { + BytePlace = 0x100; + } + if (--BytePlace == -1) + { + BitField = GetBits(); + AddBits(1); + if ((BitField & 0x8000) != 0) + { + NumHuf = StMode = 0; + return; + } + Length = (BitField & 0x4000) != 0 ? 4 : 3; + AddBits(1); + Distance = decodeNum(GetBits(), STARTHF2, DecHf2, PosHf2); + Distance = (Distance << 5) | (Utility.URShift(GetBits(), 11)); + AddBits(5); + oldCopyString(Distance, Length); + return; + } + } + else + { + if (NumHuf++ >= 16 && FlagsCnt == 0) + { + StMode = 1; + } + } + AvrPlc += BytePlace; + AvrPlc -= Utility.URShift(AvrPlc, 8); + Nhfb += 16; + if (Nhfb > 0xff) + { + Nhfb = 0x90; + Nlzb = Utility.URShift(Nlzb, 1); + } + + window[unpPtr++] = (byte)(Utility.URShift(ChSet[BytePlace], 8)); + --destUnpSize; + + while (true) + { + CurByte = ChSet[BytePlace]; + NewBytePlace = NToPl[CurByte++ & 0xff]++; + if ((CurByte & 0xff) > 0xa1) + { + corrHuff(ChSet, NToPl); + } + else + { + break; + } + } + + ChSet[BytePlace] = ChSet[NewBytePlace]; + ChSet[NewBytePlace] = CurByte; + } + + private void getFlagsBuf() + { + int Flags, NewFlagsPlace; + int FlagsPlace = decodeNum(GetBits(), STARTHF2, DecHf2, PosHf2); + + while (true) + { + Flags = ChSetC[FlagsPlace]; + FlagBuf = Utility.URShift(Flags, 8); + NewFlagsPlace = NToPlC[Flags++ & 0xff]++; + if ((Flags & 0xff) != 0) + { + break; + } + corrHuff(ChSetC, NToPlC); + } + + ChSetC[FlagsPlace] = ChSetC[NewFlagsPlace]; + ChSetC[NewFlagsPlace] = Flags; + } + + private void oldUnpInitData(bool Solid) + { + if (!Solid) + { + AvrPlcB = AvrLn1 = AvrLn2 = AvrLn3 = NumHuf = Buf60 = 0; + AvrPlc = 0x3500; + MaxDist3 = 0x2001; + Nhfb = Nlzb = 0x80; + } + FlagsCnt = 0; + FlagBuf = 0; + StMode = 0; + LCount = 0; + readTop = 0; + } + + private void initHuff() + { + for (int I = 0; I < 256; I++) + { + Place[I] = PlaceA[I] = PlaceB[I] = I; + PlaceC[I] = (~I + 1) & 0xff; + ChSet[I] = ChSetB[I] = I << 8; + ChSetA[I] = I; + ChSetC[I] = ((~I + 1) & 0xff) << 8; + } + + Utility.Fill(NToPl, 0); // memset(NToPl,0,sizeof(NToPl)); + Utility.Fill(NToPlB, 0); // memset(NToPlB,0,sizeof(NToPlB)); + Utility.Fill(NToPlC, 0); // memset(NToPlC,0,sizeof(NToPlC)); + corrHuff(ChSetB, NToPlB); + } + + private void corrHuff(int[] CharSet, int[] NumToPlace) + { + int I, J, pos = 0; + for (I = 7; I >= 0; I--) + { + for (J = 0; J < 32; J++, pos++) + { + CharSet[pos] = ((CharSet[pos] & ~0xff) | I); // *CharSet=(*CharSet + + // & ~0xff) | I; + } + } + Utility.Fill(NumToPlace, 0); // memset(NumToPlace,0,sizeof(NToPl)); + for (I = 6; I >= 0; I--) + { + NumToPlace[I] = (7 - I) * 32; + } + } + + private void oldCopyString(int Distance, int Length) + { + destUnpSize -= Length; + while ((Length--) != 0) + { + window[unpPtr] = window[(unpPtr - Distance) & PackDef.MAXWINMASK]; + unpPtr = (unpPtr + 1) & PackDef.MAXWINMASK; + } + } + + private int decodeNum(int Num, int StartPos, int[] DecTab, int[] PosTab) + { + int I; + for (Num &= 0xfff0, I = 0; DecTab[I] <= Num; I++) + { + StartPos++; + } + AddBits(StartPos); + return ((Utility.URShift((Num - (I != 0 ? DecTab[I - 1] : 0)), (16 - StartPos))) + PosTab[StartPos]); + } + + private void oldUnpWriteBuf() + { + if (unpPtr != wrPtr) + { + unpSomeRead = true; + } + if (unpPtr < wrPtr) + { + writeStream.Write(window, wrPtr, -wrPtr & PackDef.MAXWINMASK); + writeStream.Write(window, 0, unpPtr); + unpAllBuf = true; + } + else + { + writeStream.Write(window, wrPtr, unpPtr - wrPtr); + } + wrPtr = unpPtr; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack20.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack20.cs new file mode 100644 index 0000000000..db8e8cd7b0 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack20.cs @@ -0,0 +1,524 @@ +/* +* Copyright (c) 2007 innoSysTec (R) GmbH, Germany. All rights reserved. +* Original author: Edmund Wagner +* Creation date: 21.06.2007 +* +* the unrar licence applies to all junrar source and binary distributions +* you are not allowed to use this source to re-create the RAR compression algorithm +*/ + +using System; +using SharpCompress.Compressors.Rar.UnpackV1.Decode; + +namespace SharpCompress.Compressors.Rar.UnpackV1 +{ + internal partial class Unpack + { + private readonly MultDecode[] MD = new MultDecode[4]; + + private readonly byte[] UnpOldTable20 = new byte[PackDef.MC20 * 4]; + + private int UnpAudioBlock, UnpChannels, UnpCurChannel, UnpChannelDelta; + + private readonly AudioVariables[] AudV = new AudioVariables[4]; + + private readonly LitDecode LD = new LitDecode(); + + private readonly DistDecode DD = new DistDecode(); + + private readonly LowDistDecode LDD = new LowDistDecode(); + + private readonly RepDecode RD = new RepDecode(); + + private readonly BitDecode BD = new BitDecode(); + + private static readonly int[] LDecode = + { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, + 56, 64, 80, 96, 112, 128, 160, 192, 224 + }; + + private static readonly byte[] LBits = + { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + 4, 5, 5, 5, 5 + }; + + private static readonly int[] DDecode = + { + 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, + 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, + 24576, 32768, 49152, 65536, 98304, 131072, 196608, 262144, 327680, + 393216, 458752, 524288, 589824, 655360, 720896, 786432, 851968, + 917504, 983040 + }; + + private static readonly int[] DBits = + { + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, + 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16 + , 16, 16, 16, 16, 16, 16, 16 + }; + + private static readonly int[] SDDecode = {0, 4, 8, 16, 32, 64, 128, 192}; + + private static readonly int[] SDBits = {2, 2, 3, 4, 5, 6, 6, 6}; + + private void unpack20(bool solid) + { + int Bits; + + if (suspended) + { + unpPtr = wrPtr; + } + else + { + UnpInitData(solid); + if (!unpReadBuf()) + { + return; + } + if (!solid) + { + if (!ReadTables20()) + { + return; + } + } + --destUnpSize; + } + + while (destUnpSize >= 0) + { + unpPtr &= PackDef.MAXWINMASK; + + if (inAddr > readTop - 30) + { + if (!unpReadBuf()) + { + break; + } + } + if (((wrPtr - unpPtr) & PackDef.MAXWINMASK) < 270 && wrPtr != unpPtr) + { + oldUnpWriteBuf(); + if (suspended) + { + return; + } + } + if (UnpAudioBlock != 0) + { + int AudioNumber = this.decodeNumber(MD[UnpCurChannel]); + + if (AudioNumber == 256) + { + if (!ReadTables20()) + { + break; + } + continue; + } + window[unpPtr++] = DecodeAudio(AudioNumber); + if (++UnpCurChannel == UnpChannels) + { + UnpCurChannel = 0; + } + --destUnpSize; + continue; + } + + int Number = this.decodeNumber(LD); + if (Number < 256) + { + window[unpPtr++] = (byte)Number; + --destUnpSize; + continue; + } + if (Number > 269) + { + int Length = LDecode[Number -= 270] + 3; + if ((Bits = LBits[Number]) > 0) + { + Length += Utility.URShift(GetBits(), (16 - Bits)); + AddBits(Bits); + } + + int DistNumber = this.decodeNumber(DD); + int Distance = DDecode[DistNumber] + 1; + if ((Bits = DBits[DistNumber]) > 0) + { + Distance += Utility.URShift(GetBits(), (16 - Bits)); + AddBits(Bits); + } + + if (Distance >= 0x2000) + { + Length++; + if (Distance >= 0x40000L) + { + Length++; + } + } + + CopyString20(Length, Distance); + continue; + } + if (Number == 269) + { + if (!ReadTables20()) + { + break; + } + continue; + } + if (Number == 256) + { + CopyString20(lastLength, lastDist); + continue; + } + if (Number < 261) + { + int Distance = oldDist[(oldDistPtr - (Number - 256)) & 3]; + int LengthNumber = this.decodeNumber(RD); + int Length = LDecode[LengthNumber] + 2; + if ((Bits = LBits[LengthNumber]) > 0) + { + Length += Utility.URShift(GetBits(), (16 - Bits)); + AddBits(Bits); + } + if (Distance >= 0x101) + { + Length++; + if (Distance >= 0x2000) + { + Length++; + if (Distance >= 0x40000) + { + Length++; + } + } + } + CopyString20(Length, Distance); + continue; + } + if (Number < 270) + { + int Distance = SDDecode[Number -= 261] + 1; + if ((Bits = SDBits[Number]) > 0) + { + Distance += Utility.URShift(GetBits(), (16 - Bits)); + AddBits(Bits); + } + CopyString20(2, Distance); + } + } + ReadLastTables(); + oldUnpWriteBuf(); + } + + private void CopyString20(int Length, int Distance) + { + lastDist = oldDist[oldDistPtr++ & 3] = Distance; + lastLength = Length; + destUnpSize -= Length; + + int DestPtr = unpPtr - Distance; + if (DestPtr < PackDef.MAXWINSIZE - 300 && unpPtr < PackDef.MAXWINSIZE - 300) + { + window[unpPtr++] = window[DestPtr++]; + window[unpPtr++] = window[DestPtr++]; + while (Length > 2) + { + Length--; + window[unpPtr++] = window[DestPtr++]; + } + } + else + { + while ((Length--) != 0) + { + window[unpPtr] = window[DestPtr++ & PackDef.MAXWINMASK]; + unpPtr = (unpPtr + 1) & PackDef.MAXWINMASK; + } + } + } + + private bool ReadTables20() + { + byte[] BitLength = new byte[PackDef.BC20]; + byte[] Table = new byte[PackDef.MC20 * 4]; + int TableSize, N, I; + if (inAddr > readTop - 25) + { + if (!unpReadBuf()) + { + return (false); + } + } + int BitField = GetBits(); + UnpAudioBlock = (BitField & 0x8000); + + if (0 == (BitField & 0x4000)) + { + // memset(UnpOldTable20,0,sizeof(UnpOldTable20)); + Utility.Fill(UnpOldTable20, (byte)0); + } + AddBits(2); + + if (UnpAudioBlock != 0) + { + UnpChannels = ((Utility.URShift(BitField, 12)) & 3) + 1; + if (UnpCurChannel >= UnpChannels) + { + UnpCurChannel = 0; + } + AddBits(2); + TableSize = PackDef.MC20 * UnpChannels; + } + else + { + TableSize = PackDef.NC20 + PackDef.DC20 + PackDef.RC20; + } + for (I = 0; I < PackDef.BC20; I++) + { + BitLength[I] = (byte)(Utility.URShift(GetBits(), 12)); + AddBits(4); + } + UnpackUtility.makeDecodeTables(BitLength, 0, BD, PackDef.BC20); + I = 0; + while (I < TableSize) + { + if (inAddr > readTop - 5) + { + if (!unpReadBuf()) + { + return (false); + } + } + int Number = this.decodeNumber(BD); + if (Number < 16) + { + Table[I] = (byte)((Number + UnpOldTable20[I]) & 0xf); + I++; + } + else if (Number == 16) + { + N = (Utility.URShift(GetBits(), 14)) + 3; + AddBits(2); + while (N-- > 0 && I < TableSize) + { + Table[I] = Table[I - 1]; + I++; + } + } + else + { + if (Number == 17) + { + N = (Utility.URShift(GetBits(), 13)) + 3; + AddBits(3); + } + else + { + N = (Utility.URShift(GetBits(), 9)) + 11; + AddBits(7); + } + while (N-- > 0 && I < TableSize) + { + Table[I++] = 0; + } + } + } + if (inAddr > readTop) + { + return (true); + } + if (UnpAudioBlock != 0) + { + for (I = 0; I < UnpChannels; I++) + { + UnpackUtility.makeDecodeTables(Table, I * PackDef.MC20, MD[I], PackDef.MC20); + } + } + else + { + UnpackUtility.makeDecodeTables(Table, 0, LD, PackDef.NC20); + UnpackUtility.makeDecodeTables(Table, PackDef.NC20, DD, PackDef.DC20); + UnpackUtility.makeDecodeTables(Table, PackDef.NC20 + PackDef.DC20, RD, PackDef.RC20); + } + + // memcpy(UnpOldTable20,Table,sizeof(UnpOldTable20)); + for (int i = 0; i < UnpOldTable20.Length; i++) + { + UnpOldTable20[i] = Table[i]; + } + return (true); + } + + private void unpInitData20(bool Solid) + { + if (!Solid) + { + UnpChannelDelta = UnpCurChannel = 0; + UnpChannels = 1; + + // memset(AudV,0,sizeof(AudV)); + AudV[0] = new AudioVariables(); + AudV[1] = new AudioVariables(); + AudV[2] = new AudioVariables(); + AudV[3] = new AudioVariables(); + + // memset(UnpOldTable20,0,sizeof(UnpOldTable20)); + Utility.Fill(UnpOldTable20, (byte)0); + } + } + + private void ReadLastTables() + { + if (readTop >= inAddr + 5) + { + if (UnpAudioBlock != 0) + { + if (this.decodeNumber(MD[UnpCurChannel]) == 256) + { + ReadTables20(); + } + } + else + { + if (this.decodeNumber(LD) == 269) + { + ReadTables20(); + } + } + } + } + + private byte DecodeAudio(int Delta) + { + AudioVariables v = AudV[UnpCurChannel]; + v.ByteCount = v.ByteCount + 1; + v.D4 = v.D3; + v.D3 = v.D2; // ->D3=V->D2; + v.D2 = v.LastDelta - v.D1; // ->D2=V->LastDelta-V->D1; + v.D1 = v.LastDelta; // V->D1=V->LastDelta; + + // int PCh=8*V->LastChar+V->K1*V->D1 +V->K2*V->D2 +V->K3*V->D3 + // +V->K4*V->D4+ V->K5*UnpChannelDelta; + int PCh = 8 * v.LastChar + v.K1 * v.D1; + PCh += v.K2 * v.D2 + v.K3 * v.D3; + PCh += v.K4 * v.D4 + v.K5 * UnpChannelDelta; + PCh = (Utility.URShift(PCh, 3)) & 0xFF; + + int Ch = PCh - Delta; + + int D = ((byte)Delta) << 3; + + v.Dif[0] += Math.Abs(D); // V->Dif[0]+=abs(D); + v.Dif[1] += Math.Abs(D - v.D1); // V->Dif[1]+=abs(D-V->D1); + v.Dif[2] += Math.Abs(D + v.D1); // V->Dif[2]+=abs(D+V->D1); + v.Dif[3] += Math.Abs(D - v.D2); // V->Dif[3]+=abs(D-V->D2); + v.Dif[4] += Math.Abs(D + v.D2); // V->Dif[4]+=abs(D+V->D2); + v.Dif[5] += Math.Abs(D - v.D3); // V->Dif[5]+=abs(D-V->D3); + v.Dif[6] += Math.Abs(D + v.D3); // V->Dif[6]+=abs(D+V->D3); + v.Dif[7] += Math.Abs(D - v.D4); // V->Dif[7]+=abs(D-V->D4); + v.Dif[8] += Math.Abs(D + v.D4); // V->Dif[8]+=abs(D+V->D4); + v.Dif[9] += Math.Abs(D - UnpChannelDelta); // V->Dif[9]+=abs(D-UnpChannelDelta); + v.Dif[10] += Math.Abs(D + UnpChannelDelta); // V->Dif[10]+=abs(D+UnpChannelDelta); + + v.LastDelta = (byte)(Ch - v.LastChar); + UnpChannelDelta = v.LastDelta; + v.LastChar = Ch; // V->LastChar=Ch; + + if ((v.ByteCount & 0x1F) == 0) + { + int MinDif = v.Dif[0], NumMinDif = 0; + v.Dif[0] = 0; // ->Dif[0]=0; + for (int I = 1; I < v.Dif.Length; I++) + { + if (v.Dif[I] < MinDif) + { + MinDif = v.Dif[I]; + NumMinDif = I; + } + v.Dif[I] = 0; + } + switch (NumMinDif) + { + case 1: + if (v.K1 >= -16) + { + v.K1 = v.K1 - 1; // V->K1--; + } + break; + + case 2: + if (v.K1 < 16) + { + v.K1 = v.K1 + 1; // V->K1++; + } + break; + + case 3: + if (v.K2 >= -16) + { + v.K2 = v.K2 - 1; // V->K2--; + } + break; + + case 4: + if (v.K2 < 16) + { + v.K2 = v.K2 + 1; // V->K2++; + } + break; + + case 5: + if (v.K3 >= -16) + { + v.K3 = v.K3 - 1; + } + break; + + case 6: + if (v.K3 < 16) + { + v.K3 = v.K3 + 1; + } + break; + + case 7: + if (v.K4 >= -16) + { + v.K4 = v.K4 - 1; + } + break; + + case 8: + if (v.K4 < 16) + { + v.K4 = v.K4 + 1; + } + break; + + case 9: + if (v.K5 >= -16) + { + v.K5 = v.K5 - 1; + } + break; + + case 10: + if (v.K5 < 16) + { + v.K5 = v.K5 + 1; + } + break; + } + } + return ((byte)Ch); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack50.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack50.cs new file mode 100644 index 0000000000..274692cfb8 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/Unpack50.cs @@ -0,0 +1,841 @@ +#if true +using System; +using System.Collections.Generic; +using SharpCompress.Compressors.Rar.UnpackV1.Decode; +using SharpCompress.Compressors.Rar.VM; + +using size_t=System.UInt32; +using UnpackBlockHeader = SharpCompress.Compressors.Rar.UnpackV1; + +namespace SharpCompress.Compressors.Rar.UnpackV1 +{ + internal partial class Unpack + { + + // Maximum allowed number of compressed bits processed in quick mode. + private const int MAX_QUICK_DECODE_BITS = 10; + + // Maximum number of filters per entire data block. Must be at least + // twice more than MAX_PACK_FILTERS to store filters from two data blocks. + private const int MAX_UNPACK_FILTERS = 8192; + + // Maximum number of filters per entire data block for RAR3 unpack. + // Must be at least twice more than v3_MAX_PACK_FILTERS to store filters + // from two data blocks. + private const int MAX3_UNPACK_FILTERS = 8192; + + // Limit maximum number of channels in RAR3 delta filter to some reasonable + // value to prevent too slow processing of corrupt archives with invalid + // channels number. Must be equal or larger than v3_MAX_FILTER_CHANNELS. + // No need to provide it for RAR5, which uses only 5 bits to store channels. + private const int MAX3_UNPACK_CHANNELS = 1024; + + // Maximum size of single filter block. We restrict it to limit memory + // allocation. Must be equal or larger than MAX_ANALYZE_SIZE. + private const int MAX_FILTER_BLOCK_SIZE = 0x400000; + + // Write data in 4 MB or smaller blocks. Must not exceed PACK_MAX_WRITE, + // so we keep number of buffered filter in unpacker reasonable. + private const int UNPACK_MAX_WRITE = 0x400000; + + // Decode compressed bit fields to alphabet numbers. +// struct DecodeTable +// { +// // Real size of DecodeNum table. +// public uint MaxNum; +// +// // Left aligned start and upper limit codes defining code space +// // ranges for bit lengths. DecodeLen[BitLength-1] defines the start of +// // range for bit length and DecodeLen[BitLength] defines next code +// // after the end of range or in other words the upper limit code +// // for specified bit length. +// //uint DecodeLen[16]; +// public uint [] DecodeLen = new uint[16]; +// +// // Every item of this array contains the sum of all preceding items. +// // So it contains the start position in code list for every bit length. +// public uint DecodePos[16]; +// +// // Number of compressed bits processed in quick mode. +// // Must not exceed MAX_QUICK_DECODE_BITS. +// public uint QuickBits; +// +// // Translates compressed bits (up to QuickBits length) +// // to bit length in quick mode. +// public byte QuickLen[1< Filters { get { return filters; } } + +// TODO: make sure these aren't already somewhere else +public int BlockSize; +public int BlockBitSize; +public int BlockStart; +public int HeaderSize; +public bool LastBlockInFile; +public bool TablePresent; + + public void Unpack5(bool Solid) { + FileExtracted=true; + + if (!Suspended) + { + UnpInitData(Solid); + if (!UnpReadBuf()) + return; + + // Check TablesRead5 to be sure that we read tables at least once + // regardless of current block header TablePresent flag. + // So we can safefly use these tables below. + if (!ReadBlockHeader() || + !ReadTables() || !TablesRead5) + return; + } + + while (true) + { + UnpPtr &= MaxWinMask; + + if (Inp.InAddr>=ReadBorder) + { + bool FileDone=false; + + // We use 'while', because for empty block containing only Huffman table, + // we'll be on the block border once again just after reading the table. + while (Inp.InAddr>BlockHeader.BlockStart+BlockHeader.BlockSize-1 || + Inp.InAddr==BlockHeader.BlockStart+BlockHeader.BlockSize-1 && + Inp.InBit>=BlockHeader.BlockBitSize) + { + if (BlockHeader.LastBlockInFile) + { + FileDone=true; + break; + } + if (!ReadBlockHeader() || !ReadTables()) + return; + } + if (FileDone || !UnpReadBuf()) + break; + } + + if (((WriteBorder-UnpPtr) & MaxWinMask)DestUnpSize) + return; + if (Suspended) + { + FileExtracted=false; + return; + } + } + + //uint MainSlot=DecodeNumber(Inp,LD); + uint MainSlot= this.DecodeNumber(LD); + if (MainSlot<256) + { +// if (Fragmented) +// FragWindow[UnpPtr++]=(byte)MainSlot; +// else + Window[UnpPtr++]=(byte)MainSlot; + continue; + } + if (MainSlot>=262) + { + uint Length=SlotToLength(MainSlot-262); + + //uint DBits,Distance=1,DistSlot=DecodeNumber(Inp,&BlockTables.DD); + int DBits; + uint Distance=1,DistSlot=this.DecodeNumber(DD); + if (DistSlot<4) + { + DBits=0; + Distance+=DistSlot; + } + else + { + //DBits=DistSlot/2 - 1; + DBits=(int)(DistSlot/2 - 1); + Distance+=(2 | (DistSlot & 1)) << DBits; + } + + if (DBits>0) + { + if (DBits>=4) + { + if (DBits>4) + { + Distance+=((Inp.getbits()>>(36-DBits))<<4); + Inp.AddBits(DBits-4); + } + //uint LowDist=DecodeNumber(Inp,&BlockTables.LDD); + uint LowDist=this.DecodeNumber(LDD); + Distance+=LowDist; + } + else + { + Distance+=Inp.getbits()>>(32-DBits); + Inp.AddBits(DBits); + } + } + + if (Distance>0x100) + { + Length++; + if (Distance>0x2000) + { + Length++; + if (Distance>0x40000) + Length++; + } + } + + InsertOldDist(Distance); + LastLength=Length; +// if (Fragmented) +// FragWindow.CopyString(Length,Distance,UnpPtr,MaxWinMask); +// else + CopyString(Length,Distance); + continue; + } + if (MainSlot==256) + { + UnpackFilter Filter = new UnpackFilter(); + if (!ReadFilter(Filter) || !AddFilter(Filter)) + break; + continue; + } + if (MainSlot==257) + { + if (LastLength!=0) +// if (Fragmented) +// FragWindow.CopyString(LastLength,OldDist[0],UnpPtr,MaxWinMask); +// else + //CopyString(LastLength,OldDist[0]); + CopyString(LastLength,OldDistN(0)); + continue; + } + if (MainSlot<262) + { + //uint DistNum=MainSlot-258; + int DistNum=(int)(MainSlot-258); + //uint Distance=OldDist[DistNum]; + uint Distance=OldDistN(DistNum); + //for (uint I=DistNum;I>0;I--) + for (int I=DistNum;I>0;I--) + //OldDistN[I]=OldDistN(I-1); + SetOldDistN(I, OldDistN(I-1)); + //OldDistN[0]=Distance; + SetOldDistN(0, Distance); + + uint LengthSlot=this.DecodeNumber(RD); + uint Length=SlotToLength(LengthSlot); + LastLength=Length; +// if (Fragmented) +// FragWindow.CopyString(Length,Distance,UnpPtr,MaxWinMask); +// else + CopyString(Length,Distance); + continue; + } + } + UnpWriteBuf(); + } + + private uint ReadFilterData() + { + uint ByteCount=(Inp.fgetbits()>>14)+1; + Inp.AddBits(2); + + uint Data=0; + //for (uint I=0;I>8)<<(I*8); + Inp.AddBits(8); + } + return Data; + } + + private bool ReadFilter(UnpackFilter Filter) + { + if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-16) + if (!UnpReadBuf()) + return false; + + Filter.uBlockStart=ReadFilterData(); + Filter.uBlockLength=ReadFilterData(); + if (Filter.BlockLength>MAX_FILTER_BLOCK_SIZE) + Filter.BlockLength=0; + + //Filter.Type=Inp.fgetbits()>>13; + Filter.Type=(byte)(Inp.fgetbits()>>13); + Inp.faddbits(3); + + if (Filter.Type==(byte)FilterType.FILTER_DELTA) + { + //Filter.Channels=(Inp.fgetbits()>>11)+1; + Filter.Channels=(byte)((Inp.fgetbits()>>11)+1); + Inp.faddbits(5); + } + + return true; + } + + private bool AddFilter(UnpackFilter Filter) + { + if (Filters.Count>=MAX_UNPACK_FILTERS) + { + UnpWriteBuf(); // Write data, apply and flush filters. + if (Filters.Count>=MAX_UNPACK_FILTERS) + InitFilters(); // Still too many filters, prevent excessive memory use. + } + + // If distance to filter start is that large that due to circular dictionary + // mode now it points to old not written yet data, then we set 'NextWindow' + // flag and process this filter only after processing that older data. + Filter.NextWindow=WrPtr!=UnpPtr && ((WrPtr-UnpPtr)&MaxWinMask)<=Filter.BlockStart; + + Filter.uBlockStart=(uint)((Filter.BlockStart+UnpPtr)&MaxWinMask); + Filters.Add(Filter); + return true; + } + + private bool UnpReadBuf() + { + int DataSize=ReadTop-Inp.InAddr; // Data left to process. + if (DataSize<0) + return false; + BlockHeader.BlockSize-=Inp.InAddr-BlockHeader.BlockStart; + if (Inp.InAddr>MAX_SIZE/2) + { + // If we already processed more than half of buffer, let's move + // remaining data into beginning to free more space for new data + // and ensure that calling function does not cross the buffer border + // even if we did not read anything here. Also it ensures that read size + // is not less than CRYPT_BLOCK_SIZE, so we can align it without risk + // to make it zero. + if (DataSize>0) + //memmove(Inp.InBuf,Inp.InBuf+Inp.InAddr,DataSize); + Array.Copy(InBuf, inAddr, InBuf, 0, DataSize); + // TODO: perf + //Buffer.BlockCopy(InBuf, inAddr, InBuf, 0, DataSize); + + Inp.InAddr=0; + ReadTop=DataSize; + } + else + DataSize=ReadTop; + int ReadCode=0; + if (MAX_SIZE!=DataSize) + //ReadCode=UnpIO->UnpRead(Inp.InBuf+DataSize,BitInput.MAX_SIZE-DataSize); + ReadCode = readStream.Read(InBuf, DataSize, MAX_SIZE-DataSize); + if (ReadCode>0) // Can be also -1. + ReadTop+=ReadCode; + ReadBorder=ReadTop-30; + BlockHeader.BlockStart=Inp.InAddr; + if (BlockHeader.BlockSize!=-1) // '-1' means not defined yet. + { + // We may need to quit from main extraction loop and read new block header + // and trees earlier than data in input buffer ends. + ReadBorder=Math.Min(ReadBorder,BlockHeader.BlockStart+BlockHeader.BlockSize-1); + } + return ReadCode!=-1; + } + +//? +// void UnpWriteBuf() +// { +// size_t WrittenBorder=WrPtr; +// size_t FullWriteSize=(UnpPtr-WrittenBorder)&MaxWinMask; +// size_t WriteSizeLeft=FullWriteSize; +// bool NotAllFiltersProcessed=false; +// for (size_t I=0;IType==FilterType.FILTER_NONE) +// continue; +// if (flt->NextWindow) +// { +// // Here we skip filters which have block start in current data range +// // due to address wrap around in circular dictionary, but actually +// // belong to next dictionary block. If such filter start position +// // is included to current write range, then we reset 'NextWindow' flag. +// // In fact we can reset it even without such check, because current +// // implementation seems to guarantee 'NextWindow' flag reset after +// // buffer writing for all existing filters. But let's keep this check +// // just in case. Compressor guarantees that distance between +// // filter block start and filter storing position cannot exceed +// // the dictionary size. So if we covered the filter block start with +// // our write here, we can safely assume that filter is applicable +// // to next block on no further wrap arounds is possible. +// if (((flt->BlockStart-WrPtr)&MaxWinMask)<=FullWriteSize) +// flt->NextWindow=false; +// continue; +// } +// uint BlockStart=flt->BlockStart; +// uint BlockLength=flt->BlockLength; +// if (((BlockStart-WrittenBorder)&MaxWinMask)0) // We set it to 0 also for invalid filters. +// { +// uint BlockEnd=(BlockStart+BlockLength)&MaxWinMask; +// +// FilterSrcMemory.Alloc(BlockLength); +// byte *Mem=&FilterSrcMemory[0]; +// if (BlockStartUnpWrite(OutMem,BlockLength); +// +// UnpSomeRead=true; +// WrittenFileSize+=BlockLength; +// WrittenBorder=BlockEnd; +// WriteSizeLeft=(UnpPtr-WrittenBorder)&MaxWinMask; +// } +// } +// else +// { +// // Current filter intersects the window write border, so we adjust +// // the window border to process this filter next time, not now. +// WrPtr=WrittenBorder; +// +// // Since Filter start position can only increase, we quit processing +// // all following filters for this data block and reset 'NextWindow' +// // flag for them. +// for (size_t J=I;JType!=FilterType.FILTER_NONE) +// flt->NextWindow=false; +// } +// +// // Do not write data left after current filter now. +// NotAllFiltersProcessed=true; +// break; +// } +// } +// } +// +// // Remove processed filters from queue. +// size_t EmptyCount=0; +// for (size_t I=0;I0) +// Filters[I-EmptyCount]=Filters[I]; +// if (Filters[I].Type==FilterType.FILTER_NONE) +// EmptyCount++; +// } +// if (EmptyCount>0) +// Filters.Alloc(Filters.Size()-EmptyCount); +// +// if (!NotAllFiltersProcessed) // Only if all filters are processed. +// { +// // Write data left after last filter. +// UnpWriteArea(WrittenBorder,UnpPtr); +// WrPtr=UnpPtr; +// } +// +// // We prefer to write data in blocks not exceeding UNPACK_MAX_WRITE +// // instead of potentially huge MaxWinSize blocks. It also allows us +// // to keep the size of Filters array reasonable. +// WriteBorder=(UnpPtr+Min(MaxWinSize,UNPACK_MAX_WRITE))&MaxWinMask; +// +// // Choose the nearest among WriteBorder and WrPtr actual written border. +// // If border is equal to UnpPtr, it means that we have MaxWinSize data ahead. +// if (WriteBorder==UnpPtr || +// WrPtr!=UnpPtr && ((WrPtr-UnpPtr)&MaxWinMask)<((WriteBorder-UnpPtr)&MaxWinMask)) +// WriteBorder=WrPtr; +// } + + +// unused + //x byte* ApplyFilter(byte *Data,uint DataSize,UnpackFilter *Flt) +// byte[] ApplyFilter(byte []Data, uint DataSize, UnpackFilter Flt) +// { +// //x byte *SrcData=Data; +// byte []SrcData=Data; +// switch(Flt.Type) +// { +// case (byte)FilterType.FILTER_E8: +// case (byte)FilterType.FILTER_E8E9: +// { +// uint FileOffset=(uint)WrittenFileSize; +// +// const uint FileSize=0x1000000; +// byte CmpByte2=Flt.Type==(byte)FilterType.FILTER_E8E9 ? (byte)0xe9 : (byte)0xe8; +// // DataSize is unsigned, so we use "CurPos+4" and not "DataSize-4" +// // to avoid overflow for DataSize<4. +// for (uint CurPos=0;CurPos+4=0 +// RawPut4(Addr+FileSize,Data); +// } +// else +// if (((Addr-FileSize) & 0x80000000)!=0) // Addr>8); +// D[2]=(byte)(Offset>>16); +// } +// } +// } +// return SrcData; +// case (byte)FilterType.FILTER_DELTA: +// { +// // Unlike RAR3, we do not need to reject excessive channel +// // values here, since RAR5 uses only 5 bits to store channel. +// uint Channels=Flt->Channels,SrcPos=0; +// +// FilterDstMemory.Alloc(DataSize); +// byte *DstData=&FilterDstMemory[0]; +// +// // Bytes from same channels are grouped to continual data blocks, +// // so we need to place them back to their interleaving positions. +// for (uint CurChannel=0;CurChannel0) +//// { +//// size_t BlockSize=FragWindow.GetBlockSize(StartPtr,SizeToWrite); +//// UnpWriteData(&FragWindow[StartPtr],BlockSize); +//// SizeToWrite-=BlockSize; +//// StartPtr=(StartPtr+BlockSize) & MaxWinMask; +//// } +//// } +//// else +// if (EndPtr=DestUnpSize) +// return; +// size_t WriteSize=Size; +// long LeftToWrite=DestUnpSize-WrittenFileSize; +// if ((long)WriteSize>LeftToWrite) +// WriteSize=(size_t)LeftToWrite; +// UnpIO->UnpWrite(Data,WriteSize); +// WrittenFileSize+=Size; +// } + + private void UnpInitData50(bool Solid) + { + if (!Solid) + TablesRead5=false; + } + + private bool ReadBlockHeader() + { + Header.HeaderSize=0; + + if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-7) + if (!UnpReadBuf()) + return false; + //Inp.faddbits((8-Inp.InBit)&7); + Inp.faddbits((uint)((8-Inp.InBit)&7)); + + byte BlockFlags=(byte)(Inp.fgetbits()>>8); + Inp.faddbits(8); + //uint ByteCount=((BlockFlags>>3)&3)+1; // Block size byte count. + uint ByteCount=(uint)(((BlockFlags>>3)&3)+1); // Block size byte count. + + if (ByteCount==4) + return false; + + //Header.HeaderSize=2+ByteCount; + Header.HeaderSize=(int)(2+ByteCount); + + Header.BlockBitSize=(BlockFlags&7)+1; + + byte SavedCheckSum=(byte)(Inp.fgetbits()>>8); + Inp.faddbits(8); + + int BlockSize=0; + //for (uint I=0;I>8)<<(I*8); + BlockSize+=(int)(Inp.fgetbits()>>8)<<(I*8); + Inp.AddBits(8); + } + + Header.BlockSize=BlockSize; + byte CheckSum=(byte)(0x5a^BlockFlags^BlockSize^(BlockSize>>8)^(BlockSize>>16)); + if (CheckSum!=SavedCheckSum) + return false; + + Header.BlockStart=Inp.InAddr; + ReadBorder=Math.Min(ReadBorder,Header.BlockStart+Header.BlockSize-1); + + Header.LastBlockInFile=(BlockFlags & 0x40)!=0; + Header.TablePresent=(BlockFlags & 0x80)!=0; + return true; + } + +//? +// bool ReadTables(BitInput Inp, ref UnpackBlockHeader Header, ref UnpackBlockTables Tables) +// { +// if (!Header.TablePresent) +// return true; +// +// if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-25) +// if (!UnpReadBuf()) +// return false; +// +// byte BitLength[BC]; +// for (uint I=0;I> 12); +// Inp.faddbits(4); +// if (Length==15) +// { +// uint ZeroCount=(byte)(Inp.fgetbits() >> 12); +// Inp.faddbits(4); +// if (ZeroCount==0) +// BitLength[I]=15; +// else +// { +// ZeroCount+=2; +// while (ZeroCount-- > 0 && IReadTop-5) +// if (!UnpReadBuf()) +// return false; +// uint Number=DecodeNumber(Inp,&Tables.BD); +// if (Number<16) +// { +// Table[I]=Number; +// I++; +// } +// else +// if (Number<18) +// { +// uint N; +// if (Number==16) +// { +// N=(Inp.fgetbits() >> 13)+3; +// Inp.faddbits(3); +// } +// else +// { +// N=(Inp.fgetbits() >> 9)+11; +// Inp.faddbits(7); +// } +// if (I==0) +// { +// // We cannot have "repeat previous" code at the first position. +// // Multiple such codes would shift Inp position without changing I, +// // which can lead to reading beyond of Inp boundary in mutithreading +// // mode, where Inp.ExternalBuffer disables bounds check and we just +// // reserve a lot of buffer space to not need such check normally. +// return false; +// } +// else +// while (N-- > 0 && I> 13)+3; +// Inp.faddbits(3); +// } +// else +// { +// N=(Inp.fgetbits() >> 9)+11; +// Inp.faddbits(7); +// } +// while (N-- > 0 && IReadTop) +// return false; +// MakeDecodeTables(&Table[0],&Tables.LD,NC); +// MakeDecodeTables(&Table[NC],&Tables.DD,DC); +// MakeDecodeTables(&Table[NC+DC],&Tables.LDD,LDC); +// MakeDecodeTables(&Table[NC+DC+LDC],&Tables.RD,RC); +// return true; +// } + +//? +// void InitFilters() +// { +// Filters.SoftReset(); +// } + + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackFilter.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackFilter.cs new file mode 100644 index 0000000000..66d5d88751 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackFilter.cs @@ -0,0 +1,33 @@ +using SharpCompress.Compressors.Rar.VM; + +namespace SharpCompress.Compressors.Rar.UnpackV1 +{ + internal class UnpackFilter + { + public byte Type; + public byte Channels; + + internal UnpackFilter() + { + Program = new VMPreparedProgram(); + } + + // TODO uint + internal uint uBlockStart { get { return (uint)BlockStart; } set { BlockStart = (int)value; } } + internal uint uBlockLength { get { return (uint)BlockLength; } set { BlockLength = (int)value; } } + + internal int BlockStart { get; set; } + + internal int BlockLength { get; set; } + + internal int ExecCount { get; set; } + + internal bool NextWindow { get; set; } + + // position of parent filter in Filters array used as prototype for filter + // in PrgStack array. Not defined for filters in Filters array. + internal int ParentFilter { get; set; } + + internal VMPreparedProgram Program { get; set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackInline.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackInline.cs new file mode 100644 index 0000000000..1bb2893d01 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackInline.cs @@ -0,0 +1,31 @@ + +namespace SharpCompress.Compressors.Rar.UnpackV1 +{ + internal partial class Unpack + { + private uint SlotToLength(uint Slot) + { + //uint LBits,Length=2; + int LBits; + uint Length=2; + if (Slot<8) + { + LBits=0; + Length+=Slot; + } + else + { + //LBits=Slot/4-1; + LBits=(int)(Slot/4-1); + Length+=(4 | (Slot & 3)) << LBits; + } + + if (LBits>0) + { + Length+=getbits()>>(16-LBits); + AddBits(LBits); + } + return Length; + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackUtility.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackUtility.cs new file mode 100644 index 0000000000..46261496f7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV1/UnpackUtility.cs @@ -0,0 +1,220 @@ +using SharpCompress.Compressors.Rar.VM; + +namespace SharpCompress.Compressors.Rar.UnpackV1 +{ + internal static class UnpackUtility + { +//!!! TODO rename methods + internal static uint DecodeNumber(this BitInput input, Decode.Decode dec) { + return (uint)input.decodeNumber(dec); + } + + internal static int decodeNumber(this BitInput input, Decode.Decode dec) + { + int bits; + long bitField = input.GetBits() & 0xfffe; + + // if (bitField < dec.getDecodeLen()[8]) { + // if (bitField < dec.getDecodeLen()[4]) { + // if (bitField < dec.getDecodeLen()[2]) { + // if (bitField < dec.getDecodeLen()[1]) { + // bits = 1; + // } else { + // bits = 2; + // } + // } else { + // if (bitField < dec.getDecodeLen()[3]) { + // bits = 3; + // } else { + // bits = 4; + // } + // } + // } else { + // if (bitField < dec.getDecodeLen()[6]) { + // if (bitField < dec.getDecodeLen()[5]) + // bits = 5; + // else + // bits = 6; + // } else { + // if (bitField < dec.getDecodeLen()[7]) { + // bits = 7; + // } else { + // bits = 8; + // } + // } + // } + // } else { + // if (bitField < dec.getDecodeLen()[12]) { + // if (bitField < dec.getDecodeLen()[10]) + // if (bitField < dec.getDecodeLen()[9]) + // bits = 9; + // else + // bits = 10; + // else if (bitField < dec.getDecodeLen()[11]) + // bits = 11; + // else + // bits = 12; + // } else { + // if (bitField < dec.getDecodeLen()[14]) { + // if (bitField < dec.getDecodeLen()[13]) { + // bits = 13; + // } else { + // bits = 14; + // } + // } else { + // bits = 15; + // } + // } + // } + // addbits(bits); + // int N = dec.getDecodePos()[bits] + // + (((int) bitField - dec.getDecodeLen()[bits - 1]) >>> (16 - bits)); + // if (N >= dec.getMaxNum()) { + // N = 0; + // } + // return (dec.getDecodeNum()[N]); + int[] decodeLen = dec.DecodeLen; + if (bitField < decodeLen[8]) + { + if (bitField < decodeLen[4]) + { + if (bitField < decodeLen[2]) + { + if (bitField < decodeLen[1]) + { + bits = 1; + } + else + { + bits = 2; + } + } + else + { + if (bitField < decodeLen[3]) + { + bits = 3; + } + else + { + bits = 4; + } + } + } + else + { + if (bitField < decodeLen[6]) + { + if (bitField < decodeLen[5]) + { + bits = 5; + } + else + { + bits = 6; + } + } + else + { + if (bitField < decodeLen[7]) + { + bits = 7; + } + else + { + bits = 8; + } + } + } + } + else + { + if (bitField < decodeLen[12]) + { + if (bitField < decodeLen[10]) + { + if (bitField < decodeLen[9]) + { + bits = 9; + } + else + { + bits = 10; + } + } + else if (bitField < decodeLen[11]) + { + bits = 11; + } + else + { + bits = 12; + } + } + else + { + if (bitField < decodeLen[14]) + { + if (bitField < decodeLen[13]) + { + bits = 13; + } + else + { + bits = 14; + } + } + else + { + bits = 15; + } + } + } + input.AddBits(bits); + int N = dec.DecodePos[bits] + (Utility.URShift(((int)bitField - decodeLen[bits - 1]), (16 - bits))); + if (N >= dec.MaxNum) + { + N = 0; + } + return (dec.DecodeNum[N]); + } + + internal static void makeDecodeTables(byte[] lenTab, int offset, Decode.Decode dec, int size) + { + int[] lenCount = new int[16]; + int[] tmpPos = new int[16]; + int i; + long M, N; + + Utility.Fill(lenCount, 0); // memset(LenCount,0,sizeof(LenCount)); + + Utility.Fill(dec.DecodeNum, 0); // memset(Dec->DecodeNum,0,Size*sizeof(*Dec->DecodeNum)); + + for (i = 0; i < size; i++) + { + lenCount[lenTab[offset + i] & 0xF]++; + } + lenCount[0] = 0; + for (tmpPos[0] = 0, dec.DecodePos[0] = 0, dec.DecodeLen[0] = 0, N = 0, i = 1; i < 16; i++) + { + N = 2 * (N + lenCount[i]); + M = N << (15 - i); + if (M > 0xFFFF) + { + M = 0xFFFF; + } + dec.DecodeLen[i] = (int)M; + tmpPos[i] = dec.DecodePos[i] = dec.DecodePos[i - 1] + lenCount[i - 1]; + } + + for (i = 0; i < size; i++) + { + if (lenTab[offset + i] != 0) + { + dec.DecodeNum[tmpPos[lenTab[offset + i] & 0xF]++] = i; + } + } + dec.MaxNum = size; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_cpp.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_cpp.cs new file mode 100644 index 0000000000..ff7b70d7f0 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_cpp.cs @@ -0,0 +1,67 @@ +#if !Rar2017_64bit +using nint = System.Int32; +using nuint = System.UInt32; +using size_t = System.UInt32; +#else +using nint = System.Int64; +using nuint = System.UInt64; +using size_t = System.UInt64; +#endif + +namespace SharpCompress.Compressors.Rar.UnpackV2017 +{ + internal partial class BitInput + { + +public BitInput(bool AllocBuffer) +{ + ExternalBuffer=false; + if (AllocBuffer) + { + // getbits32 attempts to read data from InAddr, ... InAddr+3 positions. + // So let's allocate 3 additional bytes for situation, when we need to + // read only 1 byte from the last position of buffer and avoid a crash + // from access to next 3 bytes, which contents we do not need. + size_t BufSize=MAX_SIZE+3; + InBuf=new byte[BufSize]; + + // Ensure that we get predictable results when accessing bytes in area + // not filled with read data. + //memset(InBuf,0,BufSize); + } + else + InBuf=null; +} + + +//BitInput::~BitInput() +//{ +// if (!ExternalBuffer) +// delete[] InBuf; +//} +// + +public +void faddbits(uint Bits) +{ + // Function wrapped version of inline addbits to save code size. + addbits(Bits); +} + +public +uint fgetbits() +{ + // Function wrapped version of inline getbits to save code size. + return getbits(); +} + + private void SetExternalBuffer(byte []Buf) +{ + //if (InBuf!=NULL && !ExternalBuffer) + // delete[] InBuf; + InBuf=Buf; + ExternalBuffer=true; +} + + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_hpp.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_hpp.cs new file mode 100644 index 0000000000..4f16592ed2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_hpp.cs @@ -0,0 +1,70 @@ +namespace SharpCompress.Compressors.Rar.UnpackV2017 +{ + +internal partial class BitInput +{ + public const int MAX_SIZE=0x8000; // Size of input buffer. + + public int InAddr; // Curent byte position in the buffer. + public int InBit; // Current bit position in the current byte. + + public bool ExternalBuffer; + + //BitInput(bool AllocBuffer); + //~BitInput(); + + public byte[] InBuf; // Dynamically allocated input buffer. + + public + void InitBitInput() + { + InAddr=InBit=0; + } + + // Move forward by 'Bits' bits. + public void addbits(uint _Bits) + { + var Bits = checked((int)_Bits); + Bits+=InBit; + InAddr+=Bits>>3; + InBit=Bits&7; + } + + // Return 16 bits from current position in the buffer. + // Bit at (InAddr,InBit) has the highest position in returning data. + public uint getbits() + { + uint BitField=(uint)InBuf[InAddr] << 16; + BitField|=(uint)InBuf[InAddr+1] << 8; + BitField|=(uint)InBuf[InAddr+2]; + BitField >>= (8-InBit); + return BitField & 0xffff; + } + + // Return 32 bits from current position in the buffer. + // Bit at (InAddr,InBit) has the highest position in returning data. + public uint getbits32() + { + uint BitField=(uint)InBuf[InAddr] << 24; + BitField|=(uint)InBuf[InAddr+1] << 16; + BitField|=(uint)InBuf[InAddr+2] << 8; + BitField|=(uint)InBuf[InAddr+3]; + BitField <<= InBit; + BitField|=(uint)InBuf[InAddr+4] >> (8-InBit); + return BitField & 0xffffffff; + } + + //void faddbits(uint Bits); + //uint fgetbits(); + + // Check if buffer has enough space for IncPtr bytes. Returns 'true' + // if buffer will be overflown. + private bool Overflow(uint IncPtr) + { + return InAddr+IncPtr>=MAX_SIZE; + } + + //void SetExternalBuffer(byte *Buf); +} + +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/FragmentedWindow.unpack50frag_cpp.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/FragmentedWindow.unpack50frag_cpp.cs new file mode 100644 index 0000000000..e361421763 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/FragmentedWindow.unpack50frag_cpp.cs @@ -0,0 +1,154 @@ +#if !Rar2017_64bit +using nint = System.Int32; +using nuint = System.UInt32; +using size_t = System.UInt32; +#else +using nint = System.Int64; +using nuint = System.UInt64; +using size_t = System.UInt64; +#endif +using int64 = System.Int64; + +using System; + +namespace SharpCompress.Compressors.Rar.UnpackV2017 +{ + internal partial class FragmentedWindow + { + +public FragmentedWindow() +{ + //memset(Mem,0,sizeof(Mem)); + //memset(MemSize,0,sizeof(MemSize)); +} + + +//FragmentedWindow::~FragmentedWindow() +//{ +// Reset(); +//} + + private void Reset() +{ + for (uint I=0;I=MinSize) + { + NewMem=new byte[Size]; + if (NewMem!=null) + break; + Size-=Size/32; + } + if (NewMem==null) + //throw std::bad_alloc(); + throw new InvalidOperationException(); + + // Clean the window to generate the same output when unpacking corrupt + // RAR files, which may access to unused areas of sliding dictionary. + // sharpcompress: don't need this, freshly allocated above + //Utility.Memset(NewMem,0,Size); + + Mem[BlockNum]=NewMem; + TotalSize+=Size; + MemSize[BlockNum]=TotalSize; + BlockNum++; + } + if (TotalSize 0) + { + this[UnpPtr]=this[SrcPtr++ & MaxWinMask]; + // We need to have masked UnpPtr after quit from loop, so it must not + // be replaced with '(*this)[UnpPtr++ & MaxWinMask]' + UnpPtr=(UnpPtr+1) & MaxWinMask; + } +} + + +public void CopyData(byte[] Dest, size_t destOffset, size_t WinPos,size_t Size) +{ + for (size_t I=0;I DestUnpSize; } + + public int Char + { + get + { + // TODO: coderb: not sure where the "MAXSIZE-30" comes from, ported from V1 code + if (InAddr > MAX_SIZE - 30) + { + UnpReadBuf(); + } + return InBuf[InAddr++]; + } + } + + public int PpmEscChar { get => PPMEscChar; set => PPMEscChar = value; } + + public static byte[] EnsureCapacity(byte[] array, int length) { + return array.Length < length ? new byte[length] : array; + } + + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.rawint_hpp.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.rawint_hpp.cs new file mode 100644 index 0000000000..3a34f9d0f3 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.rawint_hpp.cs @@ -0,0 +1,126 @@ +#if !Rar2017_64bit +using nint = System.Int32; +using nuint = System.UInt32; +using size_t = System.UInt32; +#else +using nint = System.Int64; +using nuint = System.UInt64; +using size_t = System.UInt64; +#endif +using int64 = System.Int64; +using uint32 = System.UInt32; + +using System; +using System.Collections.Generic; +using System.Text; + +namespace SharpCompress.Compressors.Rar.UnpackV2017 +{ + internal partial class Unpack + { +//#define rotls(x,n,xsize) (((x)<<(n)) | ((x)>>(xsize-(n)))) +//#define rotrs(x,n,xsize) (((x)>>(n)) | ((x)<<(xsize-(n)))) +//#define rotl32(x,n) rotls(x,n,32) +//#define rotr32(x,n) rotrs(x,n,32) +// +//inline uint RawGet2(const void *Data) +//{ +// byte *D=(byte *)Data; +// return D[0]+(D[1]<<8); +//} + + private uint32 RawGet4(byte[] D, int offset) +{ + return (uint)(D[offset]+(D[offset+1]<<8)+(D[offset+2]<<16)+(D[offset+3]<<24)); +} + +//inline uint64 RawGet8(const void *Data) +//{ +//#if defined(BIG_ENDIAN) || !defined(ALLOW_MISALIGNED) +// byte *D=(byte *)Data; +// return INT32TO64(RawGet4(D+4),RawGet4(D)); +//#else +// return *(uint64 *)Data; +//#endif +//} +// +// +//inline void RawPut2(uint Field,void *Data) +//{ +// byte *D=(byte *)Data; +// D[0]=(byte)(Field); +// D[1]=(byte)(Field>>8); +//} + + private void RawPut4(uint32 Field,byte[] D, int offset) +{ + D[offset]=(byte)(Field); + D[offset+1]=(byte)(Field>>8); + D[offset+2]=(byte)(Field>>16); + D[offset+3]=(byte)(Field>>24); +} + +//inline void RawPut8(uint64 Field,void *Data) +//{ +//#if defined(BIG_ENDIAN) || !defined(ALLOW_MISALIGNED) +// byte *D=(byte *)Data; +// D[0]=(byte)(Field); +// D[1]=(byte)(Field>>8); +// D[2]=(byte)(Field>>16); +// D[3]=(byte)(Field>>24); +// D[4]=(byte)(Field>>32); +// D[5]=(byte)(Field>>40); +// D[6]=(byte)(Field>>48); +// D[7]=(byte)(Field>>56); +//#else +// *(uint64 *)Data=Field; +//#endif +//} + + +//#if defined(LITTLE_ENDIAN) && defined(ALLOW_MISALIGNED) +//#define USE_MEM_BYTESWAP +//#endif + +// Load 4 big endian bytes from memory and return uint32. +//inline uint32 RawGetBE4(const byte *m) +//{ +//#if defined(USE_MEM_BYTESWAP) && defined(_MSC_VER) +// return _byteswap_ulong(*(uint32 *)m); +//#elif defined(USE_MEM_BYTESWAP) && (__GNUC__ > 3) && (__GNUC_MINOR__ > 2) +// return __builtin_bswap32(*(uint32 *)m); +//#else +// return uint32(m[0]<<24) | uint32(m[1]<<16) | uint32(m[2]<<8) | m[3]; +//#endif +//} + + +// Save integer to memory as big endian. +//inline void RawPutBE4(uint32 i,byte *mem) +//{ +//#if defined(USE_MEM_BYTESWAP) && defined(_MSC_VER) +// *(uint32*)mem = _byteswap_ulong(i); +//#elif defined(USE_MEM_BYTESWAP) && (__GNUC__ > 3) && (__GNUC_MINOR__ > 2) +// *(uint32*)mem = __builtin_bswap32(i); +//#else +// mem[0]=byte(i>>24); +// mem[1]=byte(i>>16); +// mem[2]=byte(i>>8); +// mem[3]=byte(i); +//#endif +//} + + +//inline uint32 ByteSwap32(uint32 i) +//{ +//#ifdef _MSC_VER +// return _byteswap_ulong(i); +//#elif (__GNUC__ > 3) && (__GNUC_MINOR__ > 2) +// return __builtin_bswap32(i); +//#else +// return (rotl32(i,24)&0xFF00FF00)|(rotl32(i,8)&0x00FF00FF); +//#endif +//} + + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack15_cpp.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack15_cpp.cs new file mode 100644 index 0000000000..2b71751e8c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack15_cpp.cs @@ -0,0 +1,501 @@ +using static SharpCompress.Compressors.Rar.UnpackV2017.Unpack.Unpack15Local; + +namespace SharpCompress.Compressors.Rar.UnpackV2017 +{ + internal partial class Unpack + { + private const int STARTL1 =2; + + private static readonly uint[] DecL1={0x8000,0xa000,0xc000,0xd000,0xe000,0xea00, + 0xee00,0xf000,0xf200,0xf200,0xffff}; + + private static readonly uint[] PosL1={0,0,0,2,3,5,7,11,16,20,24,32,32}; + + private const int STARTL2 =3; + + private static readonly uint[] DecL2={0xa000,0xc000,0xd000,0xe000,0xea00,0xee00, + 0xf000,0xf200,0xf240,0xffff}; + + private static readonly uint[] PosL2={0,0,0,0,5,7,9,13,18,22,26,34,36}; + + private const int STARTHF0 =4; + + private static readonly uint[] DecHf0={0x8000,0xc000,0xe000,0xf200,0xf200,0xf200, + 0xf200,0xf200,0xffff}; + + private static readonly uint[] PosHf0={0,0,0,0,0,8,16,24,33,33,33,33,33}; + + private const int STARTHF1 =5; + + private static readonly uint[] DecHf1={0x2000,0xc000,0xe000,0xf000,0xf200,0xf200, + 0xf7e0,0xffff}; + + private static readonly uint[] PosHf1={0,0,0,0,0,0,4,44,60,76,80,80,127}; + + private const int STARTHF2 =5; + + private static readonly uint[] DecHf2={0x1000,0x2400,0x8000,0xc000,0xfa00,0xffff, + 0xffff,0xffff}; + + private static readonly uint[] PosHf2={0,0,0,0,0,0,2,7,53,117,233,0,0}; + + private const int STARTHF3 =6; + + private static readonly uint[] DecHf3={0x800,0x2400,0xee00,0xfe80,0xffff,0xffff, + 0xffff}; + + private static readonly uint[] PosHf3={0,0,0,0,0,0,0,2,16,218,251,0,0}; + + private const int STARTHF4 =8; + private static readonly uint[] DecHf4={0xff00,0xffff,0xffff,0xffff,0xffff,0xffff}; + private static readonly uint[] PosHf4={0,0,0,0,0,0,0,0,0,255,0,0,0}; + + private void Unpack15(bool Solid) +{ + UnpInitData(Solid); + UnpInitData15(Solid); + UnpReadBuf(); + if (!Solid) + { + InitHuff(); + UnpPtr=0; + } + else + UnpPtr=WrPtr; + --DestUnpSize; + if (DestUnpSize>=0) + { + GetFlagsBuf(); + FlagsCnt=8; + } + + while (DestUnpSize>=0) + { + UnpPtr&=MaxWinMask; + + if (Inp.InAddr>ReadTop-30 && !UnpReadBuf()) + break; + if (((WrPtr-UnpPtr) & MaxWinMask)<270 && WrPtr!=UnpPtr) + UnpWriteBuf20(); + if (StMode != 0) + { + HuffDecode(); + continue; + } + + if (--FlagsCnt < 0) + { + GetFlagsBuf(); + FlagsCnt=7; + } + + if ((FlagBuf & 0x80) != 0) + { + FlagBuf<<=1; + if (Nlzb > Nhfb) + LongLZ(); + else + HuffDecode(); + } + else + { + FlagBuf<<=1; + if (--FlagsCnt < 0) + { + GetFlagsBuf(); + FlagsCnt=7; + } + if ((FlagBuf & 0x80) != 0) + { + FlagBuf<<=1; + if (Nlzb > Nhfb) + HuffDecode(); + else + LongLZ(); + } + else + { + FlagBuf<<=1; + ShortLZ(); + } + } + } + UnpWriteBuf20(); +} + + +//#define GetShortLen1(pos) ((pos)==1 ? Buf60+3:ShortLen1[pos]) + private uint GetShortLen1(uint pos) { return ((pos)==1 ? (uint)(Buf60+3):ShortLen1[pos]); } +//#define GetShortLen2(pos) ((pos)==3 ? Buf60+3:ShortLen2[pos]) + private uint GetShortLen2(uint pos) { return ((pos)==3 ? (uint)(Buf60+3):ShortLen2[pos]); } + +internal static class Unpack15Local { + public static readonly uint[] ShortLen1={1,3,4,4,5,6,7,8,8,4,4,5,6,6,4,0}; + public static readonly uint[] ShortXor1={0,0xa0,0xd0,0xe0,0xf0,0xf8,0xfc,0xfe, + 0xff,0xc0,0x80,0x90,0x98,0x9c,0xb0}; + public static readonly uint[] ShortLen2={2,3,3,3,4,4,5,6,6,4,4,5,6,6,4,0}; + public static readonly uint[] ShortXor2={0,0x40,0x60,0xa0,0xd0,0xe0,0xf0,0xf8, + 0xfc,0xc0,0x80,0x90,0x98,0x9c,0xb0}; +} + + private void ShortLZ() +{ + uint Length,SaveLength; + uint LastDistance; + uint Distance; + int DistancePlace; + NumHuf=0; + + uint BitField=Inp.fgetbits(); + if (LCount==2) + { + Inp.faddbits(1); + if (BitField >= 0x8000) + { + CopyString15((uint)LastDist,LastLength); + return; + } + BitField <<= 1; + LCount=0; + } + + BitField>>=8; + +// not thread safe, replaced by GetShortLen1 and GetShortLen2 macro +// ShortLen1[1]=ShortLen2[3]=Buf60+3; + + if (AvrLn1<37) + { + for (Length=0;;Length++) + if (((BitField^ShortXor1[Length]) & (~(0xff>>(int)GetShortLen1(Length))))==0) + break; + Inp.faddbits(GetShortLen1(Length)); + } + else + { + for (Length=0;;Length++) + if (((BitField^ShortXor2[Length]) & (~(0xff>>(int)GetShortLen2(Length))))==0) + break; + Inp.faddbits(GetShortLen2(Length)); + } + + if (Length >= 9) + { + if (Length == 9) + { + LCount++; + CopyString15((uint)LastDist,LastLength); + return; + } + if (Length == 14) + { + LCount=0; + Length=DecodeNum(Inp.fgetbits(),STARTL2,DecL2,PosL2)+5; + Distance=(Inp.fgetbits()>>1) | 0x8000; + Inp.faddbits(15); + LastLength=Length; + LastDist=Distance; + CopyString15(Distance,Length); + return; + } + + LCount=0; + SaveLength=Length; + Distance=OldDist[(OldDistPtr-(Length-9)) & 3]; + Length=DecodeNum(Inp.fgetbits(),STARTL1,DecL1,PosL1)+2; + if (Length==0x101 && SaveLength==10) + { + Buf60 ^= 1; + return; + } + if (Distance > 256) + Length++; + if (Distance >= MaxDist3) + Length++; + + OldDist[OldDistPtr++]=Distance; + OldDistPtr = OldDistPtr & 3; + LastLength=Length; + LastDist=Distance; + CopyString15(Distance,Length); + return; + } + + LCount=0; + AvrLn1 += Length; + AvrLn1 -= AvrLn1 >> 4; + + DistancePlace=(int)(DecodeNum(Inp.fgetbits(),STARTHF2,DecHf2,PosHf2) & 0xff); + Distance=ChSetA[DistancePlace]; + if (--DistancePlace != -1) + { + LastDistance=ChSetA[DistancePlace]; + ChSetA[DistancePlace+1]=(ushort)LastDistance; + ChSetA[DistancePlace]=(ushort)Distance; + } + Length+=2; + OldDist[OldDistPtr++] = ++Distance; + OldDistPtr = OldDistPtr & 3; + LastLength=Length; + LastDist=Distance; + CopyString15(Distance,Length); +} + + private void LongLZ() +{ + uint Length; + uint Distance; + uint DistancePlace,NewDistancePlace; + uint OldAvr2,OldAvr3; + + NumHuf=0; + Nlzb+=16; + if (Nlzb > 0xff) + { + Nlzb=0x90; + Nhfb >>= 1; + } + OldAvr2=AvrLn2; + + uint BitField=Inp.fgetbits(); + if (AvrLn2 >= 122) + Length=DecodeNum(BitField,STARTL2,DecL2,PosL2); + else + if (AvrLn2 >= 64) + Length=DecodeNum(BitField,STARTL1,DecL1,PosL1); + else + if (BitField < 0x100) + { + Length=BitField; + Inp.faddbits(16); + } + else + { + for (Length=0;((BitField<<(int)Length)&0x8000)==0;Length++) + ; + Inp.faddbits(Length+1); + } + + AvrLn2 += Length; + AvrLn2 -= AvrLn2 >> 5; + + BitField=Inp.fgetbits(); + if (AvrPlcB > 0x28ff) + DistancePlace=DecodeNum(BitField,STARTHF2,DecHf2,PosHf2); + else + if (AvrPlcB > 0x6ff) + DistancePlace=DecodeNum(BitField,STARTHF1,DecHf1,PosHf1); + else + DistancePlace=DecodeNum(BitField,STARTHF0,DecHf0,PosHf0); + + AvrPlcB += DistancePlace; + AvrPlcB -= AvrPlcB >> 8; + while (true) + { + Distance = ChSetB[DistancePlace & 0xff]; + NewDistancePlace = NToPlB[Distance++ & 0xff]++; + if ((Distance & 0xff) != 0) + CorrHuff(ChSetB,NToPlB); + else + break; + } + + ChSetB[DistancePlace & 0xff]=ChSetB[NewDistancePlace]; + ChSetB[NewDistancePlace]=(ushort)Distance; + + Distance=((Distance & 0xff00) | (Inp.fgetbits() >> 8)) >> 1; + Inp.faddbits(7); + + OldAvr3=AvrLn3; + if (Length!=1 && Length!=4) + if (Length==0 && Distance <= MaxDist3) + { + AvrLn3++; + AvrLn3 -= AvrLn3 >> 8; + } + else + if (AvrLn3 > 0) + AvrLn3--; + Length+=3; + if (Distance >= MaxDist3) + Length++; + if (Distance <= 256) + Length+=8; + if (OldAvr3 > 0xb0 || AvrPlc >= 0x2a00 && OldAvr2 < 0x40) + MaxDist3=0x7f00; + else + MaxDist3=0x2001; + OldDist[OldDistPtr++]=Distance; + OldDistPtr = OldDistPtr & 3; + LastLength=Length; + LastDist=Distance; + CopyString15(Distance,Length); +} + + private void HuffDecode() +{ + uint CurByte,NewBytePlace; + uint Length; + uint Distance; + int BytePlace; + + uint BitField=Inp.fgetbits(); + + if (AvrPlc > 0x75ff) + BytePlace=(int)DecodeNum(BitField,STARTHF4,DecHf4,PosHf4); + else + if (AvrPlc > 0x5dff) + BytePlace=(int)DecodeNum(BitField,STARTHF3,DecHf3,PosHf3); + else + if (AvrPlc > 0x35ff) + BytePlace=(int)DecodeNum(BitField,STARTHF2,DecHf2,PosHf2); + else + if (AvrPlc > 0x0dff) + BytePlace=(int)DecodeNum(BitField,STARTHF1,DecHf1,PosHf1); + else + BytePlace=(int)DecodeNum(BitField,STARTHF0,DecHf0,PosHf0); + BytePlace&=0xff; + if (StMode != 0) + { + if (BytePlace==0 && BitField > 0xfff) + BytePlace=0x100; + if (--BytePlace==-1) + { + BitField=Inp.fgetbits(); + Inp.faddbits(1); + if ((BitField & 0x8000) != 0) + { + NumHuf=StMode=0; + return; + } + else + { + Length = (BitField & 0x4000) != 0 ? 4U : 3; + Inp.faddbits(1); + Distance=DecodeNum(Inp.fgetbits(),STARTHF2,DecHf2,PosHf2); + Distance = (Distance << 5) | (Inp.fgetbits() >> 11); + Inp.faddbits(5); + CopyString15(Distance,Length); + return; + } + } + } + else + if (NumHuf++ >= 16 && FlagsCnt==0) + StMode=1; + AvrPlc += (uint)BytePlace; + AvrPlc -= AvrPlc >> 8; + Nhfb+=16; + if (Nhfb > 0xff) + { + Nhfb=0x90; + Nlzb >>= 1; + } + + Window[UnpPtr++]=(byte)(ChSet[BytePlace]>>8); + --DestUnpSize; + + while (true) + { + CurByte=ChSet[BytePlace]; + NewBytePlace=NToPl[CurByte++ & 0xff]++; + if ((CurByte & 0xff) > 0xa1) + CorrHuff(ChSet,NToPl); + else + break; + } + + ChSet[BytePlace]=ChSet[NewBytePlace]; + ChSet[NewBytePlace]=(ushort)CurByte; +} + + private void GetFlagsBuf() +{ + uint Flags,NewFlagsPlace; + uint FlagsPlace=DecodeNum(Inp.fgetbits(),STARTHF2,DecHf2,PosHf2); + + // Our Huffman table stores 257 items and needs all them in other parts + // of code such as when StMode is on, so the first item is control item. + // While normally we do not use the last item to code the flags byte here, + // we need to check for value 256 when unpacking in case we unpack + // a corrupt archive. + if (FlagsPlace>=ChSetC.Length) + return; + + while (true) + { + Flags=ChSetC[FlagsPlace]; + FlagBuf=Flags>>8; + NewFlagsPlace=NToPlC[Flags++ & 0xff]++; + if ((Flags & 0xff) != 0) + break; + CorrHuff(ChSetC,NToPlC); + } + + ChSetC[FlagsPlace]=ChSetC[NewFlagsPlace]; + ChSetC[NewFlagsPlace]=(ushort)Flags; +} + + private void UnpInitData15(bool Solid) +{ + if (!Solid) + { + AvrPlcB=AvrLn1=AvrLn2=AvrLn3=0; + NumHuf=Buf60=0; + AvrPlc=0x3500; + MaxDist3=0x2001; + Nhfb=Nlzb=0x80; + } + FlagsCnt=0; + FlagBuf=0; + StMode=0; + LCount=0; + ReadTop=0; +} + + private void InitHuff() +{ + for (uint I=0;I<256;I++) + { + ChSet[I]=ChSetB[I]=(ushort)(I<<8); + ChSetA[I]=(ushort)I; + ChSetC[I]=(ushort)(((~I+1) & 0xff)<<8); + } + Utility.Memset(NToPl,0,NToPl.Length); + Utility.Memset(NToPlB,0,NToPlB.Length); + Utility.Memset(NToPlC,0,NToPlC.Length); + CorrHuff(ChSetB,NToPlB); +} + + private void CorrHuff(ushort[] CharSet,byte[] NumToPlace) +{ + int I,J; + for (I=7;I>=0;I--) + for (J=0;J<32;J++) + CharSet[J]=(ushort)((CharSet[J] & ~0xff) | I); + Utility.Memset(NumToPlace,0,NToPl.Length); + for (I=6;I>=0;I--) + NumToPlace[I]=(byte)((7-I)*32); +} + + private void CopyString15(uint Distance,uint Length) +{ + DestUnpSize-=Length; + while (Length-- != 0) + { + Window[UnpPtr]=Window[(UnpPtr-Distance) & MaxWinMask]; + UnpPtr=(UnpPtr+1) & MaxWinMask; + } +} + + private uint DecodeNum(uint Num,uint StartPos,uint[] DecTab,uint[] PosTab) +{ + int I; + for (Num&=0xfff0,I=0;DecTab[I]<=Num;I++) + StartPos++; + Inp.faddbits(StartPos); + return(((Num-(I != 0 ? DecTab[I-1]:0))>>(int)(16-StartPos))+PosTab[StartPos]); +} + + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack20_cpp.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack20_cpp.cs new file mode 100644 index 0000000000..1a78f9afb2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack20_cpp.cs @@ -0,0 +1,399 @@ +#if !Rar2017_64bit +using nint = System.Int32; +using nuint = System.UInt32; +using size_t = System.UInt32; +#else +using nint = System.Int64; +using nuint = System.UInt64; +using size_t = System.UInt64; +#endif +using int64 = System.Int64; + +using System; +using static SharpCompress.Compressors.Rar.UnpackV2017.PackDef; +using static SharpCompress.Compressors.Rar.UnpackV2017.Unpack.Unpack20Local; + +namespace SharpCompress.Compressors.Rar.UnpackV2017 +{ + internal partial class Unpack + { + private void CopyString20(uint Length,uint Distance) +{ + LastDist=OldDist[OldDistPtr++ & 3]=Distance; + LastLength=Length; + DestUnpSize-=Length; + CopyString(Length,Distance); +} + + +internal static class Unpack20Local { + public static readonly byte[] LDecode={0,1,2,3,4,5,6,7,8,10,12,14,16,20,24,28,32,40,48,56,64,80,96,112,128,160,192,224}; + public static readonly byte[] LBits= {0,0,0,0,0,0,0,0,1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5}; + public static readonly uint[] DDecode={0,1,2,3,4,6,8,12,16,24,32,48,64,96,128,192,256,384,512,768,1024,1536,2048,3072,4096,6144,8192,12288,16384,24576,32768U,49152U,65536,98304,131072,196608,262144,327680,393216,458752,524288,589824,655360,720896,786432,851968,917504,983040}; + public static readonly byte[] DBits= {0,0,0,0,1,1,2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16}; + public static readonly byte[] SDDecode={0,4,8,16,32,64,128,192}; + public static readonly byte[] SDBits= {2,2,3, 4, 5, 6, 6, 6}; +} + + private void Unpack20(bool Solid) +{ + uint Bits; + + if (Suspended) + UnpPtr=WrPtr; + else + { + UnpInitData(Solid); + if (!UnpReadBuf()) + return; + if ((!Solid || !TablesRead2) && !ReadTables20()) + return; + --DestUnpSize; + } + + while (DestUnpSize>=0) + { + UnpPtr&=MaxWinMask; + + if (Inp.InAddr>ReadTop-30) + if (!UnpReadBuf()) + break; + if (((WrPtr-UnpPtr) & MaxWinMask)<270 && WrPtr!=UnpPtr) + { + UnpWriteBuf20(); + if (Suspended) + return; + } + if (UnpAudioBlock) + { + uint AudioNumber=DecodeNumber(Inp,MD[UnpCurChannel]); + + if (AudioNumber==256) + { + if (!ReadTables20()) + break; + continue; + } + Window[UnpPtr++]=DecodeAudio((int)AudioNumber); + if (++UnpCurChannel==UnpChannels) + UnpCurChannel=0; + --DestUnpSize; + continue; + } + + uint Number=DecodeNumber(Inp,BlockTables.LD); + if (Number<256) + { + Window[UnpPtr++]=(byte)Number; + --DestUnpSize; + continue; + } + if (Number>269) + { + uint Length=(uint)(LDecode[Number-=270]+3); + if ((Bits=LBits[Number])>0) + { + Length+=Inp.getbits()>>(int)(16-Bits); + Inp.addbits(Bits); + } + + uint DistNumber=DecodeNumber(Inp,BlockTables.DD); + uint Distance=DDecode[DistNumber]+1; + if ((Bits=DBits[DistNumber])>0) + { + Distance+=Inp.getbits()>>(int)(16-Bits); + Inp.addbits(Bits); + } + + if (Distance>=0x2000) + { + Length++; + if (Distance>=0x40000L) + Length++; + } + + CopyString20(Length,Distance); + continue; + } + if (Number==269) + { + if (!ReadTables20()) + break; + continue; + } + if (Number==256) + { + CopyString20(LastLength,LastDist); + continue; + } + if (Number<261) + { + uint Distance=OldDist[(OldDistPtr-(Number-256)) & 3]; + uint LengthNumber=DecodeNumber(Inp,BlockTables.RD); + uint Length=(uint)(LDecode[LengthNumber]+2); + if ((Bits=LBits[LengthNumber])>0) + { + Length+=Inp.getbits()>>(int)(16-Bits); + Inp.addbits(Bits); + } + if (Distance>=0x101) + { + Length++; + if (Distance>=0x2000) + { + Length++; + if (Distance>=0x40000) + Length++; + } + } + CopyString20(Length,Distance); + continue; + } + if (Number<270) + { + uint Distance=(uint)(SDDecode[Number-=261]+1); + if ((Bits=SDBits[Number])>0) + { + Distance+=Inp.getbits()>>(int)(16-Bits); + Inp.addbits(Bits); + } + CopyString20(2,Distance); + continue; + } + } + ReadLastTables(); + UnpWriteBuf20(); +} + + private void UnpWriteBuf20() +{ + if (UnpPtr!=WrPtr) + UnpSomeRead=true; + if (UnpPtrReadTop-25) + if (!UnpReadBuf()) + return false; + uint BitField=Inp.getbits(); + UnpAudioBlock=(BitField & 0x8000)!=0; + + if ((BitField & 0x4000) != 0) + Utility.Memset(UnpOldTable20,0,UnpOldTable20.Length); + Inp.addbits(2); + + uint TableSize; + if (UnpAudioBlock) + { + UnpChannels=((BitField>>12) & 3)+1; + if (UnpCurChannel>=UnpChannels) + UnpCurChannel=0; + Inp.addbits(2); + TableSize=MC20*UnpChannels; + } + else + TableSize=NC20+DC20+RC20; + + for (uint I=0;I> 12); + Inp.addbits(4); + } + MakeDecodeTables(BitLength,0,BlockTables.BD,BC20); + for (uint I=0;IReadTop-5) + if (!UnpReadBuf()) + return false; + uint Number=DecodeNumber(Inp,BlockTables.BD); + if (Number<16) + { + Table[I]=(byte)((Number+UnpOldTable20[I]) & 0xf); + I++; + } + else + if (Number==16) + { + uint N=(Inp.getbits() >> 14)+3; + Inp.addbits(2); + if (I==0) + return false; // We cannot have "repeat previous" code at the first position. + else + while (N-- > 0 && I> 13)+3; + Inp.addbits(3); + } + else + { + N=(Inp.getbits() >> 9)+11; + Inp.addbits(7); + } + while (N-- > 0 && IReadTop) + return true; + if (UnpAudioBlock) + for (uint I=0;I=Inp.InAddr+5) + if (UnpAudioBlock) + { + if (DecodeNumber(Inp,MD[UnpCurChannel])==256) + ReadTables20(); + } + else + if (DecodeNumber(Inp,BlockTables.LD)==269) + ReadTables20(); +} + + private void UnpInitData20(bool Solid) +{ + if (!Solid) + { + TablesRead2=false; + UnpAudioBlock=false; + UnpChannelDelta=0; + UnpCurChannel=0; + UnpChannels=1; + + //memset(AudV,0,sizeof(AudV)); + AudV = new AudioVariables[4]; + Utility.Memset(UnpOldTable20, 0, UnpOldTable20.Length); + //memset(MD,0,sizeof(MD)); + MD = new DecodeTable[4]; + } +} + + private byte DecodeAudio(int Delta) +{ + AudioVariables V=AudV[UnpCurChannel]; + V.ByteCount++; + V.D4=V.D3; + V.D3=V.D2; + V.D2=V.LastDelta-V.D1; + V.D1=V.LastDelta; + int PCh=8*V.LastChar+V.K1*V.D1+V.K2*V.D2+V.K3*V.D3+V.K4*V.D4+V.K5*UnpChannelDelta; + PCh=(PCh>>3) & 0xFF; + + uint Ch=(uint)(PCh-Delta); + + int D=(sbyte)Delta; + // Left shift of negative value is undefined behavior in C++, + // so we cast it to unsigned to follow the standard. + D=(int)((uint)D<<3); + + V.Dif[0]+=(uint)Math.Abs(D); + V.Dif[1]+=(uint)Math.Abs(D-V.D1); + V.Dif[2]+=(uint)Math.Abs(D+V.D1); + V.Dif[3]+=(uint)Math.Abs(D-V.D2); + V.Dif[4]+=(uint)Math.Abs(D+V.D2); + V.Dif[5]+=(uint)Math.Abs(D-V.D3); + V.Dif[6]+=(uint)Math.Abs(D+V.D3); + V.Dif[7]+=(uint)Math.Abs(D-V.D4); + V.Dif[8]+=(uint)Math.Abs(D+V.D4); + V.Dif[9]+=(uint)Math.Abs(D-UnpChannelDelta); + V.Dif[10]+=(uint)Math.Abs(D+UnpChannelDelta); + + UnpChannelDelta=V.LastDelta=(sbyte)(Ch-V.LastChar); + V.LastChar=(int)Ch; + + if ((V.ByteCount & 0x1F)==0) + { + uint MinDif=V.Dif[0],NumMinDif=0; + V.Dif[0]=0; + for (uint I=1;I=-16) + V.K1--; + break; + case 2: + if (V.K1<16) + V.K1++; + break; + case 3: + if (V.K2>=-16) + V.K2--; + break; + case 4: + if (V.K2<16) + V.K2++; + break; + case 5: + if (V.K3>=-16) + V.K3--; + break; + case 6: + if (V.K3<16) + V.K3++; + break; + case 7: + if (V.K4>=-16) + V.K4--; + break; + case 8: + if (V.K4<16) + V.K4++; + break; + case 9: + if (V.K5>=-16) + V.K5--; + break; + case 10: + if (V.K5<16) + V.K5++; + break; + } + } + return (byte)Ch; +} + + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack30_cpp.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack30_cpp.cs new file mode 100644 index 0000000000..7e77c7d40b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack30_cpp.cs @@ -0,0 +1,800 @@ +#if !Rar2017_64bit +using nint = System.Int32; +using nuint = System.UInt32; +using size_t = System.UInt32; +#else +using nint = System.Int64; +using nuint = System.UInt64; +using size_t = System.UInt64; +#endif +using int64 = System.Int64; + +using System; +using static SharpCompress.Compressors.Rar.UnpackV2017.PackDef; +using static SharpCompress.Compressors.Rar.UnpackV2017.UnpackGlobal; +//using static SharpCompress.Compressors.Rar.UnpackV2017.Unpack.Unpack30Local; +/* +namespace SharpCompress.Compressors.Rar.UnpackV2017 +{ + internal partial class Unpack + { + +#if !RarV2017_RAR5ONLY +// We use it instead of direct PPM.DecodeChar call to be sure that +// we reset PPM structures in case of corrupt data. It is important, +// because these structures can be invalid after PPM.DecodeChar returned -1. +int SafePPMDecodeChar() +{ + int Ch=PPM.DecodeChar(); + if (Ch==-1) // Corrupt PPM data found. + { + PPM.CleanUp(); // Reset possibly corrupt PPM data structures. + UnpBlockType=BLOCK_LZ; // Set faster and more fail proof LZ mode. + } + return(Ch); +} + +internal static class Unpack30Local { + public static readonly byte[] LDecode={0,1,2,3,4,5,6,7,8,10,12,14,16,20,24,28,32,40,48,56,64,80,96,112,128,160,192,224}; + public static readonly byte[] LBits= {0,0,0,0,0,0,0,0,1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5}; + public static readonly int[] DDecode = new int[DC]; + public static readonly byte[] DBits = new byte[DC]; + public static readonly int[] DBitLengthCounts= {4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,14,0,12}; + public static readonly byte[] SDDecode={0,4,8,16,32,64,128,192}; + public static readonly byte[] SDBits= {2,2,3, 4, 5, 6, 6, 6}; +} +void Unpack29(bool Solid) +{ + uint Bits; + + if (DDecode[1]==0) + { + int Dist=0,BitLength=0,Slot=0; + for (int I=0;IReadBorder) + { + if (!UnpReadBuf30()) + break; + } + if (((WrPtr-UnpPtr) & MaxWinMask)<260 && WrPtr!=UnpPtr) + { + UnpWriteBuf30(); + if (WrittenFileSize>DestUnpSize) + return; + if (Suspended) + { + FileExtracted=false; + return; + } + } + if (UnpBlockType==BLOCK_PPM) + { + // Here speed is critical, so we do not use SafePPMDecodeChar, + // because sometimes even the inline function can introduce + // some additional penalty. + int Ch=PPM.DecodeChar(); + if (Ch==-1) // Corrupt PPM data found. + { + PPM.CleanUp(); // Reset possibly corrupt PPM data structures. + UnpBlockType=BLOCK_LZ; // Set faster and more fail proof LZ mode. + break; + } + if (Ch==PPMEscChar) + { + int NextCh=SafePPMDecodeChar(); + if (NextCh==0) // End of PPM encoding. + { + if (!ReadTables30()) + break; + continue; + } + if (NextCh==-1) // Corrupt PPM data found. + break; + if (NextCh==2) // End of file in PPM mode. + break; + if (NextCh==3) // Read VM code. + { + if (!ReadVMCodePPM()) + break; + continue; + } + if (NextCh==4) // LZ inside of PPM. + { + uint Distance=0,Length; + bool Failed=false; + for (int I=0;I<4 && !Failed;I++) + { + int _Ch=SafePPMDecodeChar(); + if (_Ch==-1) + Failed=true; + else + if (I==3) + Length=(byte)_Ch; + else + Distance=(Distance<<8)+(byte)_Ch; + } + if (Failed) + break; + + CopyString(Length+32,Distance+2); + continue; + } + if (NextCh==5) // One byte distance match (RLE) inside of PPM. + { + int Length=SafePPMDecodeChar(); + if (Length==-1) + break; + CopyString((uint)(Length+4),1); + continue; + } + // If we are here, NextCh must be 1, what means that current byte + // is equal to our 'escape' byte, so we just store it to Window. + } + Window[UnpPtr++]=(byte)Ch; + continue; + } + + uint Number=DecodeNumber(Inp,BlockTables.LD); + if (Number<256) + { + Window[UnpPtr++]=(byte)Number; + continue; + } + if (Number>=271) + { + uint Length=(uint)(LDecode[Number-=271]+3); + if ((Bits=LBits[Number])>0) + { + Length+=Inp.getbits()>>(int)(16-Bits); + Inp.addbits(Bits); + } + + uint DistNumber=DecodeNumber(Inp,BlockTables.DD); + uint Distance=(uint)(DDecode[DistNumber]+1); + if ((Bits=DBits[DistNumber])>0) + { + if (DistNumber>9) + { + if (Bits>4) + { + Distance+=((Inp.getbits()>>(int)(20-Bits))<<4); + Inp.addbits(Bits-4); + } + if (LowDistRepCount>0) + { + LowDistRepCount--; + Distance+=(uint)PrevLowDist; + } + else + { + uint LowDist=DecodeNumber(Inp,BlockTables.LDD); + if (LowDist==16) + { + LowDistRepCount=(int)(LOW_DIST_REP_COUNT-1); + Distance+=(uint)PrevLowDist; + } + else + { + Distance+=LowDist; + PrevLowDist=(int)LowDist; + } + } + } + else + { + Distance+=Inp.getbits()>>(int)(16-Bits); + Inp.addbits(Bits); + } + } + + if (Distance>=0x2000) + { + Length++; + if (Distance>=0x40000) + Length++; + } + + InsertOldDist(Distance); + LastLength=Length; + CopyString(Length,Distance); + continue; + } + if (Number==256) + { + if (!ReadEndOfBlock()) + break; + continue; + } + if (Number==257) + { + if (!ReadVMCode()) + break; + continue; + } + if (Number==258) + { + if (LastLength!=0) + CopyString(LastLength,OldDist[0]); + continue; + } + if (Number<263) + { + uint DistNum=Number-259; + uint Distance=OldDist[DistNum]; + for (uint I=DistNum;I>0;I--) + OldDist[I]=OldDist[I-1]; + OldDist[0]=Distance; + + uint LengthNumber=DecodeNumber(Inp,BlockTables.RD); + int Length=LDecode[LengthNumber]+2; + if ((Bits=LBits[LengthNumber])>0) + { + Length+=(int)(Inp.getbits()>>(int)(16-Bits)); + Inp.addbits(Bits); + } + LastLength=(uint)Length; + CopyString((uint)Length,Distance); + continue; + } + if (Number<272) + { + uint Distance=(uint)(SDDecode[Number-=263]+1); + if ((Bits=SDBits[Number])>0) + { + Distance+=Inp.getbits()>>(int)(16-Bits); + Inp.addbits(Bits); + } + InsertOldDist(Distance); + LastLength=2; + CopyString(2,Distance); + continue; + } + } + UnpWriteBuf30(); +} + + +// Return 'false' to quit unpacking the current file or 'true' to continue. +bool ReadEndOfBlock() +{ + uint BitField=Inp.getbits(); + bool NewTable,NewFile=false; + + // "1" - no new file, new table just here. + // "00" - new file, no new table. + // "01" - new file, new table (in beginning of next file). + + if ((BitField & 0x8000)!=0) + { + NewTable=true; + Inp.addbits(1); + } + else + { + NewFile=true; + NewTable=(BitField & 0x4000)!=0; + Inp.addbits(2); + } + TablesRead3=!NewTable; + + // Quit immediately if "new file" flag is set. If "new table" flag + // is present, we'll read the table in beginning of next file + // based on 'TablesRead3' 'false' value. + if (NewFile) + return false; + return ReadTables30(); // Quit only if we failed to read tables. +} + + +bool ReadVMCode() +{ + // Entire VM code is guaranteed to fully present in block defined + // by current Huffman table. Compressor checks that VM code does not cross + // Huffman block boundaries. + uint FirstByte=Inp.getbits()>>8; + Inp.addbits(8); + uint Length=(FirstByte & 7)+1; + if (Length==7) + { + Length=(Inp.getbits()>>8)+7; + Inp.addbits(8); + } + else + if (Length==8) + { + Length=Inp.getbits(); + Inp.addbits(16); + } + if (Length==0) + return false; + Array VMCode(Length); + for (uint I=0;I=ReadTop-1 && !UnpReadBuf30() && I>8; + Inp.addbits(8); + } + return AddVMCode(FirstByte,&VMCode[0],Length); +} + + +bool ReadVMCodePPM() +{ + uint FirstByte=(uint)SafePPMDecodeChar(); + if ((int)FirstByte==-1) + return false; + uint Length=(FirstByte & 7)+1; + if (Length==7) + { + int B1=SafePPMDecodeChar(); + if (B1==-1) + return false; + Length=B1+7; + } + else + if (Length==8) + { + int B1=SafePPMDecodeChar(); + if (B1==-1) + return false; + int B2=SafePPMDecodeChar(); + if (B2==-1) + return false; + Length=B1*256+B2; + } + if (Length==0) + return false; + Array VMCode(Length); + for (uint I=0;IFilters30.Count || FiltPos>OldFilterLengths.Count) + return false; + LastFilter=(int)FiltPos; + bool NewFilter=(FiltPos==Filters30.Count); + + UnpackFilter30 StackFilter=new UnpackFilter30(); // New filter for PrgStack. + + UnpackFilter30 Filter; + if (NewFilter) // New filter code, never used before since VM reset. + { + if (FiltPos>MAX3_UNPACK_FILTERS) + { + // Too many different filters, corrupt archive. + //delete StackFilter; + return false; + } + + Filters30.Add(1); + Filters30[Filters30.Count-1]=Filter=new UnpackFilter30(); + StackFilter.ParentFilter=(uint)(Filters30.Count-1); + + // Reserve one item to store the data block length of our new filter + // entry. We'll set it to real block length below, after reading it. + // But we need to initialize it now, because when processing corrupt + // data, we can access this item even before we set it to real value. + OldFilterLengths.Add(0); + } + else // Filter was used in the past. + { + Filter=Filters30[(int)FiltPos]; + StackFilter.ParentFilter=FiltPos; + } + + int EmptyCount=0; + for (int I=0;I0) + PrgStack[I]=null; + } + if (EmptyCount==0) + { + if (PrgStack.Count>MAX3_UNPACK_FILTERS) + { + //delete StackFilter; + return false; + } + PrgStack.Add(1); + EmptyCount=1; + } + size_t StackPos=(uint)(this.PrgStack.Count-EmptyCount); + PrgStack[(int)StackPos]=StackFilter; + + uint BlockStart=RarVM.ReadData(VMCodeInp); + if ((FirstByte & 0x40)!=0) + BlockStart+=258; + StackFilter.BlockStart=(uint)((BlockStart+UnpPtr)&MaxWinMask); + if ((FirstByte & 0x20)!=0) + { + StackFilter.BlockLength=RarVM.ReadData(VMCodeInp); + + // Store the last data block length for current filter. + OldFilterLengths[(int)FiltPos]=(int)StackFilter.BlockLength; + } + else + { + // Set the data block size to same value as the previous block size + // for same filter. It is possible for corrupt data to access a new + // and not filled yet item of OldFilterLengths array here. This is why + // we set new OldFilterLengths items to zero above. + StackFilter.BlockLength=FiltPos>9; + VMCodeInp.faddbits(7); + for (int I=0;I<7;I++) + if ((InitMask & (1<=0x10000 || VMCodeSize==0) + return false; + Array VMCode(VMCodeSize); + for (uint I=0;I>8; + VMCodeInp.faddbits(8); + } + VM.Prepare(&VMCode[0],VMCodeSize,&Filter->Prg); + } + StackFilter.Prg.Type=Filter.Prg.Type; + + return true; +} + + +bool UnpReadBuf30() +{ + int DataSize=ReadTop-Inp.InAddr; // Data left to process. + if (DataSize<0) + return false; + if (Inp.InAddr>BitInput.MAX_SIZE/2) + { + // If we already processed more than half of buffer, let's move + // remaining data into beginning to free more space for new data + // and ensure that calling function does not cross the buffer border + // even if we did not read anything here. Also it ensures that read size + // is not less than CRYPT_BLOCK_SIZE, so we can align it without risk + // to make it zero. + if (DataSize>0) + //x memmove(Inp.InBuf,Inp.InBuf+Inp.InAddr,DataSize); + Array.Copy(Inp.InBuf,Inp.InAddr,Inp.InBuf,0,DataSize); + Inp.InAddr=0; + ReadTop=DataSize; + } + else + DataSize=ReadTop; + int ReadCode=UnpIO_UnpRead(Inp.InBuf,DataSize,BitInput.MAX_SIZE-DataSize); + if (ReadCode>0) + ReadTop+=ReadCode; + ReadBorder=ReadTop-30; + return ReadCode!=-1; +} + + +void UnpWriteBuf30() +{ + uint WrittenBorder=(uint)WrPtr; + uint WriteSize=(uint)((UnpPtr-WrittenBorder)&MaxWinMask); + for (int I=0;IParentFilter]->Prg; + VM_PreparedProgram *Prg=&flt->Prg; + + ExecuteCode(Prg); + + byte[] FilteredData=Prg.FilteredData; + uint FilteredDataSize=Prg.FilteredDataSize; + + delete PrgStack[I]; + PrgStack[I]=null; + while (I+1Prg; + VM_PreparedProgram *NextPrg=&NextFilter->Prg; + + ExecuteCode(NextPrg); + + FilteredData=NextPrg.FilteredData; + FilteredDataSize=NextPrg.FilteredDataSize; + I++; + delete PrgStack[I]; + PrgStack[I]=null; + } + UnpIO_UnpWrite(FilteredData,0,FilteredDataSize); + UnpSomeRead=true; + WrittenFileSize+=FilteredDataSize; + WrittenBorder=BlockEnd; + WriteSize=(uint)((UnpPtr-WrittenBorder)&MaxWinMask); + } + else + { + // Current filter intersects the window write border, so we adjust + // the window border to process this filter next time, not now. + for (size_t J=I;JInitR[6]=(uint)WrittenFileSize; + VM.Execute(Prg); +} + + +bool ReadTables30() +{ + byte[] BitLength = new byte[BC]; + byte[] Table = new byte[HUFF_TABLE_SIZE30]; + if (Inp.InAddr>ReadTop-25) + if (!UnpReadBuf30()) + return(false); + Inp.faddbits((uint)((8-Inp.InBit)&7)); + uint BitField=Inp.fgetbits(); + if ((BitField & 0x8000) != 0) + { + UnpBlockType=BLOCK_PPM; + return(PPM.DecodeInit(this,PPMEscChar)); + } + UnpBlockType=BLOCK_LZ; + + PrevLowDist=0; + LowDistRepCount=0; + + if ((BitField & 0x4000) == 0) + Utility.Memset(UnpOldTable,0,UnpOldTable.Length); + Inp.faddbits(2); + + for (uint I=0;I> 12); + Inp.faddbits(4); + if (Length==15) + { + uint ZeroCount=(byte)(Inp.fgetbits() >> 12); + Inp.faddbits(4); + if (ZeroCount==0) + BitLength[I]=15; + else + { + ZeroCount+=2; + while (ZeroCount-- > 0 && IReadTop-5) + if (!UnpReadBuf30()) + return(false); + uint Number=DecodeNumber(Inp,BlockTables.BD); + if (Number<16) + { + Table[I]=(byte)((Number+this.UnpOldTable[I]) & 0xf); + I++; + } + else + if (Number<18) + { + uint N; + if (Number==16) + { + N=(Inp.fgetbits() >> 13)+3; + Inp.faddbits(3); + } + else + { + N=(Inp.fgetbits() >> 9)+11; + Inp.faddbits(7); + } + if (I==0) + return false; // We cannot have "repeat previous" code at the first position. + else + while (N-- > 0 && I> 13)+3; + Inp.faddbits(3); + } + else + { + N=(Inp.fgetbits() >> 9)+11; + Inp.faddbits(7); + } + while (N-- > 0 && IReadTop) + return false; + MakeDecodeTables(Table,0,BlockTables.LD,NC30); + MakeDecodeTables(Table,(int)NC30,BlockTables.DD,DC30); + MakeDecodeTables(Table,(int)(NC30+DC30),BlockTables.LDD,LDC30); + MakeDecodeTables(Table,(int)(NC30+DC30+LDC30),BlockTables.RD,RC30); + //x memcpy(UnpOldTable,Table,sizeof(UnpOldTable)); + Array.Copy(Table,0,UnpOldTable,0,UnpOldTable.Length); + return true; +} + +#endif + +void UnpInitData30(bool Solid) +{ + if (!Solid) + { + TablesRead3=false; + Utility.Memset(UnpOldTable, 0, UnpOldTable.Length); + PPMEscChar=2; + UnpBlockType=BLOCK_LZ; + } + InitFilters30(Solid); +} + + +void InitFilters30(bool Solid) +{ + if (!Solid) + { + //OldFilterLengths.SoftReset(); + OldFilterLengths.Clear(); + LastFilter=0; + + //for (size_t I=0;I=ReadBorder) + { + bool FileDone=false; + + // We use 'while', because for empty block containing only Huffman table, + // we'll be on the block border once again just after reading the table. + while (Inp.InAddr>BlockHeader.BlockStart+BlockHeader.BlockSize-1 || + Inp.InAddr==BlockHeader.BlockStart+BlockHeader.BlockSize-1 && + Inp.InBit>=BlockHeader.BlockBitSize) + { + if (BlockHeader.LastBlockInFile) + { + FileDone=true; + break; + } + if (!ReadBlockHeader(Inp,ref BlockHeader) || !ReadTables(Inp, ref BlockHeader, ref BlockTables)) + return; + } + if (FileDone || !UnpReadBuf()) + break; + } + + if (((WriteBorder-UnpPtr) & MaxWinMask)DestUnpSize) + return; + if (Suspended) + { + FileExtracted=false; + return; + } + } + + uint MainSlot=DecodeNumber(Inp,BlockTables.LD); + if (MainSlot<256) + { + if (Fragmented) + FragWindow[UnpPtr++]=(byte)MainSlot; + else + Window[UnpPtr++]=(byte)MainSlot; + continue; + } + if (MainSlot>=262) + { + uint Length=SlotToLength(Inp,MainSlot-262); + + uint DBits,Distance=1,DistSlot=DecodeNumber(Inp,BlockTables.DD); + if (DistSlot<4) + { + DBits=0; + Distance+=DistSlot; + } + else + { + DBits=DistSlot/2 - 1; + Distance+=(2 | (DistSlot & 1)) << (int)DBits; + } + + if (DBits>0) + { + if (DBits>=4) + { + if (DBits>4) + { + Distance+=((Inp.getbits32()>>(int)(36-DBits))<<4); + Inp.addbits(DBits-4); + } + uint LowDist=DecodeNumber(Inp,BlockTables.LDD); + Distance+=LowDist; + } + else + { + Distance+=Inp.getbits32()>>(int)(32-DBits); + Inp.addbits(DBits); + } + } + + if (Distance>0x100) + { + Length++; + if (Distance>0x2000) + { + Length++; + if (Distance>0x40000) + Length++; + } + } + + InsertOldDist(Distance); + LastLength=Length; + if (Fragmented) + FragWindow.CopyString(Length,Distance,ref UnpPtr,MaxWinMask); + else + CopyString(Length,Distance); + continue; + } + if (MainSlot==256) + { + UnpackFilter Filter = new UnpackFilter(); + if (!ReadFilter(Inp,Filter) || !AddFilter(Filter)) + break; + continue; + } + if (MainSlot==257) + { + if (LastLength!=0) + if (Fragmented) + FragWindow.CopyString(LastLength,OldDist[0],ref UnpPtr,MaxWinMask); + else + CopyString(LastLength,OldDist[0]); + continue; + } + if (MainSlot<262) + { + uint DistNum=MainSlot-258; + uint Distance=OldDist[DistNum]; + for (uint I=DistNum;I>0;I--) + OldDist[I]=OldDist[I-1]; + OldDist[0]=Distance; + + uint LengthSlot=DecodeNumber(Inp,BlockTables.RD); + uint Length=SlotToLength(Inp,LengthSlot); + LastLength=Length; + if (Fragmented) + FragWindow.CopyString(Length,Distance,ref UnpPtr,MaxWinMask); + else + CopyString(Length,Distance); + continue; + } + } + UnpWriteBuf(); +} + + private uint ReadFilterData(BitInput Inp) +{ + uint ByteCount=(Inp.fgetbits()>>14)+1; + Inp.addbits(2); + + uint Data=0; + for (uint I=0;I>8)<<(int)(I*8); + Inp.addbits(8); + } + return Data; +} + + private bool ReadFilter(BitInput Inp,UnpackFilter Filter) +{ + if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-16) + if (!UnpReadBuf()) + return false; + + Filter.BlockStart=ReadFilterData(Inp); + Filter.BlockLength=ReadFilterData(Inp); + if (Filter.BlockLength>MAX_FILTER_BLOCK_SIZE) + Filter.BlockLength=0; + + Filter.Type=(byte)(Inp.fgetbits()>>13); + Inp.faddbits(3); + + if (Filter.Type==FILTER_DELTA) + { + Filter.Channels=(byte)((Inp.fgetbits()>>11)+1); + Inp.faddbits(5); + } + + return true; +} + + private bool AddFilter(UnpackFilter Filter) +{ + if (Filters.Count>=MAX_UNPACK_FILTERS) + { + UnpWriteBuf(); // Write data, apply and flush filters. + if (Filters.Count>=MAX_UNPACK_FILTERS) + InitFilters(); // Still too many filters, prevent excessive memory use. + } + + // If distance to filter start is that large that due to circular dictionary + // mode now it points to old not written yet data, then we set 'NextWindow' + // flag and process this filter only after processing that older data. + Filter.NextWindow=WrPtr!=UnpPtr && ((WrPtr-UnpPtr)&MaxWinMask)<=Filter.BlockStart; + + Filter.BlockStart=(uint)((Filter.BlockStart+UnpPtr)&MaxWinMask); + Filters.Add(Filter); + return true; +} + + private bool UnpReadBuf() +{ + int DataSize=ReadTop-Inp.InAddr; // Data left to process. + if (DataSize<0) + return false; + BlockHeader.BlockSize-=Inp.InAddr-BlockHeader.BlockStart; + if (Inp.InAddr>MAX_SIZE/2) + { + // If we already processed more than half of buffer, let's move + // remaining data into beginning to free more space for new data + // and ensure that calling function does not cross the buffer border + // even if we did not read anything here. Also it ensures that read size + // is not less than CRYPT_BLOCK_SIZE, so we can align it without risk + // to make it zero. + if (DataSize>0) + //x memmove(Inp.InBuf,Inp.InBuf+Inp.InAddr,DataSize); + Buffer.BlockCopy(Inp.InBuf, Inp.InAddr, Inp.InBuf, 0, DataSize); + Inp.InAddr=0; + ReadTop=DataSize; + } + else + DataSize=ReadTop; + int ReadCode=0; + if (MAX_SIZE!=DataSize) + ReadCode=UnpIO_UnpRead(Inp.InBuf,DataSize,MAX_SIZE-DataSize); + if (ReadCode>0) // Can be also -1. + ReadTop+=ReadCode; + ReadBorder=ReadTop-30; + BlockHeader.BlockStart=Inp.InAddr; + if (BlockHeader.BlockSize!=-1) // '-1' means not defined yet. + { + // We may need to quit from main extraction loop and read new block header + // and trees earlier than data in input buffer ends. + ReadBorder=Math.Min(ReadBorder,BlockHeader.BlockStart+BlockHeader.BlockSize-1); + } + return ReadCode!=-1; +} + + private void UnpWriteBuf() +{ + size_t WrittenBorder=WrPtr; + size_t FullWriteSize=(UnpPtr-WrittenBorder)&MaxWinMask; + size_t WriteSizeLeft=FullWriteSize; + bool NotAllFiltersProcessed=false; + //for (size_t I=0;I int + for (int I=0;I0) // We set it to 0 also for invalid filters. + { + uint BlockEnd=(BlockStart+BlockLength)&MaxWinMask; + + //x FilterSrcMemory.Alloc(BlockLength); + FilterSrcMemory = EnsureCapacity(FilterSrcMemory, checked((int)BlockLength)); + byte[] Mem= FilterSrcMemory; + if (BlockStart int + for (int J=I;J int + int EmptyCount=0; + // sharpcompress: size_t -> int + for (int I=0;I0) + Filters[I-EmptyCount]=Filters[I]; + if (Filters[I].Type==FILTER_NONE) + EmptyCount++; + } + if (EmptyCount>0) + //Filters.Alloc(Filters.Count-EmptyCount); + Filters.RemoveRange(Filters.Count-EmptyCount, EmptyCount); + + if (!NotAllFiltersProcessed) // Only if all filters are processed. + { + // Write data left after last filter. + UnpWriteArea(WrittenBorder,UnpPtr); + WrPtr=UnpPtr; + } + + // We prefer to write data in blocks not exceeding UNPACK_MAX_WRITE + // instead of potentially huge MaxWinSize blocks. It also allows us + // to keep the size of Filters array reasonable. + WriteBorder=(UnpPtr+Math.Min(MaxWinSize,UNPACK_MAX_WRITE))&MaxWinMask; + + // Choose the nearest among WriteBorder and WrPtr actual written border. + // If border is equal to UnpPtr, it means that we have MaxWinSize data ahead. + if (WriteBorder==UnpPtr || + WrPtr!=UnpPtr && ((WrPtr-UnpPtr)&MaxWinMask)<((WriteBorder-UnpPtr)&MaxWinMask)) + WriteBorder=WrPtr; +} + + private byte[] ApplyFilter(byte[] __d,uint DataSize,UnpackFilter Flt) +{ + int Data = 0; + byte[] SrcData=__d; + switch(Flt.Type) + { + case FILTER_E8: + case FILTER_E8E9: + { + uint FileOffset=(uint)WrittenFileSize; + + const uint FileSize=0x1000000; + byte CmpByte2=Flt.Type==FILTER_E8E9 ? (byte)0xe9 : (byte)0xe8; + // DataSize is unsigned, so we use "CurPos+4" and not "DataSize-4" + // to avoid overflow for DataSize<4. + for (uint CurPos=0;CurPos+4=0 + RawPut4(Addr+FileSize,__d,Data); + } + else + if (((Addr-FileSize) & 0x80000000)!=0) // Addr>8); + __d[D+2]=(byte)(Offset>>16); + } + } + } + return SrcData; + case FILTER_DELTA: + { + // Unlike RAR3, we do not need to reject excessive channel + // values here, since RAR5 uses only 5 bits to store channel. + uint Channels=Flt.Channels,SrcPos=0; + + //x FilterDstMemory.Alloc(DataSize); + FilterDstMemory = EnsureCapacity(FilterDstMemory, checked((int)DataSize)); + + byte[] DstData=FilterDstMemory; + + // Bytes from same channels are grouped to continual data blocks, + // so we need to place them back to their interleaving positions. + for (uint CurChannel=0;CurChannel0) + { + size_t BlockSize=FragWindow.GetBlockSize(StartPtr,SizeToWrite); + //UnpWriteData(&FragWindow[StartPtr],BlockSize); + FragWindow.GetBuffer(StartPtr, out var __buffer, out var __offset); + UnpWriteData(__buffer, __offset, BlockSize); + SizeToWrite-=BlockSize; + StartPtr=(StartPtr+BlockSize) & MaxWinMask; + } + } + else + if (EndPtr=DestUnpSize) + return; + size_t WriteSize=Size; + int64 LeftToWrite=DestUnpSize-WrittenFileSize; + if ((int64)WriteSize>LeftToWrite) + WriteSize=(size_t)LeftToWrite; + UnpIO_UnpWrite(Data, offset, WriteSize); + WrittenFileSize+=Size; +} + + private void UnpInitData50(bool Solid) +{ + if (!Solid) + TablesRead5=false; +} + + private bool ReadBlockHeader(BitInput Inp,ref UnpackBlockHeader Header) +{ + Header.HeaderSize=0; + + if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-7) + if (!UnpReadBuf()) + return false; + Inp.faddbits((uint)((8-Inp.InBit)&7)); + + byte BlockFlags=(byte)(Inp.fgetbits()>>8); + Inp.faddbits(8); + uint ByteCount=(uint)(((BlockFlags>>3)&3)+1); // Block size byte count. + + if (ByteCount==4) + return false; + + Header.HeaderSize=(int)(2+ByteCount); + + Header.BlockBitSize=(BlockFlags&7)+1; + + byte SavedCheckSum=(byte)(Inp.fgetbits()>>8); + Inp.faddbits(8); + + int BlockSize=0; + for (uint I=0;I>8)<<(int)(I*8)); + Inp.addbits(8); + } + + Header.BlockSize=BlockSize; + byte CheckSum=(byte)(0x5a^BlockFlags^BlockSize^(BlockSize>>8)^(BlockSize>>16)); + if (CheckSum!=SavedCheckSum) + return false; + + Header.BlockStart=Inp.InAddr; + ReadBorder=Math.Min(ReadBorder,Header.BlockStart+Header.BlockSize-1); + + Header.LastBlockInFile=(BlockFlags & 0x40)!=0; + Header.TablePresent=(BlockFlags & 0x80)!=0; + return true; +} + + private bool ReadTables(BitInput Inp,ref UnpackBlockHeader Header, ref UnpackBlockTables Tables) +{ + if (!Header.TablePresent) + return true; + + if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-25) + if (!UnpReadBuf()) + return false; + + byte[] BitLength = new byte[BC]; + for (uint I=0;I> 12); + Inp.faddbits(4); + if (Length==15) + { + uint ZeroCount=(byte)(Inp.fgetbits() >> 12); + Inp.faddbits(4); + if (ZeroCount==0) + BitLength[I]=15; + else + { + ZeroCount+=2; + while (ZeroCount-- > 0 && IReadTop-5) + if (!UnpReadBuf()) + return false; + uint Number=DecodeNumber(Inp,Tables.BD); + if (Number<16) + { + Table[I]=(byte)Number; + I++; + } + else + if (Number<18) + { + uint N; + if (Number==16) + { + N=(Inp.fgetbits() >> 13)+3; + Inp.faddbits(3); + } + else + { + N=(Inp.fgetbits() >> 9)+11; + Inp.faddbits(7); + } + if (I==0) + { + // We cannot have "repeat previous" code at the first position. + // Multiple such codes would shift Inp position without changing I, + // which can lead to reading beyond of Inp boundary in mutithreading + // mode, where Inp.ExternalBuffer disables bounds check and we just + // reserve a lot of buffer space to not need such check normally. + return false; + } + else + while (N-- > 0 && I> 13)+3; + Inp.faddbits(3); + } + else + { + N=(Inp.fgetbits() >> 9)+11; + Inp.faddbits(7); + } + while (N-- > 0 && IReadTop) + return false; + MakeDecodeTables(Table, 0, Tables.LD,NC); + MakeDecodeTables(Table, (int)NC,Tables.DD,DC); + MakeDecodeTables(Table, (int)(NC+DC),Tables.LDD,LDC); + MakeDecodeTables(Table, (int)(NC+DC+LDC),Tables.RD,RC); + return true; +} + + private void InitFilters() +{ + //Filters.SoftReset(); + Filters.Clear(); +} + + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack_cpp.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack_cpp.cs new file mode 100644 index 0000000000..5b76ef4c5c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack_cpp.cs @@ -0,0 +1,377 @@ +#if !Rar2017_64bit +using nint = System.Int32; +using nuint = System.UInt32; +using size_t = System.UInt32; +#else +using nint = System.Int64; +using nuint = System.UInt64; +using size_t = System.UInt64; +#endif + +using System; +using SharpCompress.Common; +using static SharpCompress.Compressors.Rar.UnpackV2017.UnpackGlobal; +using static SharpCompress.Compressors.Rar.UnpackV2017.PackDef; + +namespace SharpCompress.Compressors.Rar.UnpackV2017 +{ + internal sealed partial class Unpack : BitInput + { + +public Unpack(/* ComprDataIO *DataIO */) +//:Inp(true),VMCodeInp(true) +: base(true) +{ + _UnpackCtor(); + + //UnpIO=DataIO; + Window=null; + Fragmented=false; + Suspended=false; + UnpAllBuf=false; + UnpSomeRead=false; +#if RarV2017_RAR_SMP + MaxUserThreads=1; + UnpThreadPool=CreateThreadPool(); + ReadBufMT=null; + UnpThreadData=null; +#endif + MaxWinSize=0; + MaxWinMask=0; + + // Perform initialization, which should be done only once for all files. + // It prevents crash if first DoUnpack call is later made with wrong + // (true) 'Solid' value. + UnpInitData(false); +#if !RarV2017_SFX_MODULE + // RAR 1.5 decompression initialization + UnpInitData15(false); + InitHuff(); +#endif +} + +// later: may need Dispose() if we support thread pool +//Unpack::~Unpack() +//{ +// InitFilters30(false); +// +// if (Window!=null) +// free(Window); +//#if RarV2017_RAR_SMP +// DestroyThreadPool(UnpThreadPool); +// delete[] ReadBufMT; +// delete[] UnpThreadData; +//#endif +//} + + private void Init(size_t WinSize,bool Solid) +{ + // If 32-bit RAR unpacks an archive with 4 GB dictionary, the window size + // will be 0 because of size_t overflow. Let's issue the memory error. + if (WinSize==0) + //ErrHandler.MemoryError(); + throw new InvalidFormatException("invalid window size (possibly due to a rar file with a 4GB being unpacked on a 32-bit platform)"); + + // Minimum window size must be at least twice more than maximum possible + // size of filter block, which is 0x10000 in RAR now. If window size is + // smaller, we can have a block with never cleared flt->NextWindow flag + // in UnpWriteBuf(). Minimum window size 0x20000 would be enough, but let's + // use 0x40000 for extra safety and possible filter area size expansion. + const size_t MinAllocSize=0x40000; + if (WinSize>16)>0x10000) // Window size must not exceed 4 GB. + return; + + // Archiving code guarantees that window size does not grow in the same + // solid stream. So if we are here, we are either creating a new window + // or increasing the size of non-solid window. So we could safely reject + // current window data without copying them to a new window, though being + // extra cautious, we still handle the solid window grow case below. + bool Grow=Solid && (Window!=null || Fragmented); + + // We do not handle growth for existing fragmented window. + if (Grow && Fragmented) + //throw std::bad_alloc(); + throw new InvalidFormatException("Grow && Fragmented"); + + byte[] NewWindow=Fragmented ? null : new byte[WinSize]; + + if (NewWindow==null) + if (Grow || WinSize<0x1000000) + { + // We do not support growth for new fragmented window. + // Also exclude RAR4 and small dictionaries. + //throw std::bad_alloc(); + throw new InvalidFormatException("Grow || WinSize<0x1000000"); + } + else + { + if (Window!=null) // If allocated by preceding files. + { + //free(Window); + Window=null; + } + FragWindow.Init(WinSize); + Fragmented=true; + } + + if (!Fragmented) + { + // Clean the window to generate the same output when unpacking corrupt + // RAR files, which may access unused areas of sliding dictionary. + // sharpcompress: don't need this, freshly allocated above + //memset(NewWindow,0,WinSize); + + + // If Window is not NULL, it means that window size has grown. + // In solid streams we need to copy data to a new window in such case. + // RAR archiving code does not allow it in solid streams now, + // but let's implement it anyway just in case we'll change it sometimes. + if (Grow) + for (size_t I=1;I<=MaxWinSize;I++) + NewWindow[(UnpPtr-I)&(WinSize-1)]=Window[(UnpPtr-I)&(MaxWinSize-1)]; + + //if (Window!=null) + // free(Window); + Window=NewWindow; + } + + MaxWinSize=WinSize; + MaxWinMask=MaxWinSize-1; +} + + private void DoUnpack(uint Method,bool Solid) +{ + // Methods <50 will crash in Fragmented mode when accessing NULL Window. + // They cannot be called in such mode now, but we check it below anyway + // just for extra safety. + switch(Method) + { +#if !RarV2017_SFX_MODULE + case 15: // rar 1.5 compression + if (!Fragmented) + Unpack15(Solid); + break; + case 20: // rar 2.x compression + case 26: // files larger than 2GB + if (!Fragmented) + Unpack20(Solid); + break; +#endif +#if !RarV2017_RAR5ONLY + case 29: // rar 3.x compression + if (!Fragmented) + throw new NotImplementedException(); + break; +#endif + case 50: // RAR 5.0 compression algorithm. +#if RarV2017_RAR_SMP + if (MaxUserThreads>1) + { +// We do not use the multithreaded unpack routine to repack RAR archives +// in 'suspended' mode, because unlike the single threaded code it can +// write more than one dictionary for same loop pass. So we would need +// larger buffers of unknown size. Also we do not support multithreading +// in fragmented window mode. + if (!Fragmented) + { + Unpack5MT(Solid); + break; + } + } +#endif + Unpack5(Solid); + break; +#if !Rar2017_NOSTRICT + default: throw new InvalidFormatException("unknown compression method " + Method); +#endif + } +} + + private void UnpInitData(bool Solid) +{ + if (!Solid) + { + Utility.Memset(OldDist, 0, OldDist.Length); + OldDistPtr=0; + LastDist=LastLength=0; +// memset(Window,0,MaxWinSize); + //memset(&BlockTables,0,sizeof(BlockTables)); + BlockTables = new UnpackBlockTables(); + // sharpcompress: no default ctor for struct + BlockTables.Init(); + UnpPtr=WrPtr=0; + WriteBorder=Math.Min(MaxWinSize,UNPACK_MAX_WRITE)&MaxWinMask; + } + // Filters never share several solid files, so we can safely reset them + // even in solid archive. + InitFilters(); + + Inp.InitBitInput(); + WrittenFileSize=0; + ReadTop=0; + ReadBorder=0; + + //memset(&BlockHeader,0,sizeof(BlockHeader)); + BlockHeader = new UnpackBlockHeader(); + BlockHeader.BlockSize=-1; // '-1' means not defined yet. +#if !RarV2017_SFX_MODULE + UnpInitData20(Solid); +#endif + //UnpInitData30(Solid); + UnpInitData50(Solid); +} + + +// LengthTable contains the length in bits for every element of alphabet. +// Dec is the structure to decode Huffman code/ +// Size is size of length table and DecodeNum field in Dec structure, + private void MakeDecodeTables(byte[] LengthTable, int offset, DecodeTable Dec,uint Size) +{ + // Size of alphabet and DecodePos array. + Dec.MaxNum=Size; + + // Calculate how many entries for every bit length in LengthTable we have. + uint[] LengthCount = new uint[16]; + //memset(LengthCount,0,sizeof(LengthCount)); + for (size_t I=0;IDecodeNum,0,Size*sizeof(*Dec->DecodeNum)); + Utility.FillFast(Dec.DecodeNum, 0); + + // Initialize not really used entry for zero length code. + Dec.DecodePos[0]=0; + + // Start code for bit length 1 is 0. + Dec.DecodeLen[0]=0; + + // Right aligned upper limit code for current bit length. + uint UpperLimit=0; + + for (int I=1;I<16;I++) + { + // Adjust the upper limit code. + UpperLimit+=LengthCount[I]; + + // Left aligned upper limit code. + uint LeftAligned=UpperLimit<<(16-I); + + // Prepare the upper limit code for next bit length. + UpperLimit*=2; + + // Store the left aligned upper limit code. + Dec.DecodeLen[I]=(uint)LeftAligned; + + // Every item of this array contains the sum of all preceding items. + // So it contains the start position in code list for every bit length. + Dec.DecodePos[I]=Dec.DecodePos[I-1]+LengthCount[I-1]; + } + + // Prepare the copy of DecodePos. We'll modify this copy below, + // so we cannot use the original DecodePos. + uint[] CopyDecodePos = new uint[Dec.DecodePos.Length]; + //memcpy(CopyDecodePos,Dec->DecodePos,sizeof(CopyDecodePos)); + Array.Copy(Dec.DecodePos, 0, CopyDecodePos, 0, CopyDecodePos.Length); + + // For every bit length in the bit length table and so for every item + // of alphabet. + for (uint I=0;I=Dec.DecodeLen[CurBitLength]) + CurBitLength++; + + // Translation of right aligned bit string to bit length. + Dec.QuickLen[Code]=CurBitLength; + + // Prepare the table for quick translation of position in code list + // to position in alphabet. + + // Calculate the distance from the start code for current bit length. + uint Dist=BitField-Dec.DecodeLen[CurBitLength-1]; + + // Right align the distance. + Dist>>=(16-CurBitLength); + + // Now we can calculate the position in the code list. It is the sum + // of first position for current bit length and right aligned distance + // between our bit field and start code for current bit length. + uint Pos; + if (CurBitLength 0) + { + Window[UnpPtr++] = Window[SrcPtr++]; + } + +// byte *Src=Window+SrcPtr; +// byte *Dest=Window+UnpPtr; +// UnpPtr+=Length; +// +//#if FAST_MEMCPY +// if (Distance=8) +// { +// Dest[0]=Src[0]; +// Dest[1]=Src[1]; +// Dest[2]=Src[2]; +// Dest[3]=Src[3]; +// Dest[4]=Src[4]; +// Dest[5]=Src[5]; +// Dest[6]=Src[6]; +// Dest[7]=Src[7]; +// +// Src+=8; +// Dest+=8; +// Length-=8; +// } +//#if FAST_MEMCPY +// else +// while (Length>=8) +// { +// // In theory we still could overlap here. +// // Supposing Distance == MaxWinSize - 1 we have memcpy(Src, Src + 1, 8). +// // But for real RAR archives Distance <= MaxWinSize - MAX_LZ_MATCH +// // always, so overlap here is impossible. +// +// // This memcpy expanded inline by MSVC. We could also use uint64 +// // assignment, which seems to provide about the same speed. +// memcpy(Dest,Src,8); +// +// Src+=8; +// Dest+=8; +// Length-=8; +// } +//#endif +// +// // Unroll the loop for 0 - 7 bytes left. Note that we use nested "if"s. +// if (Length>0) { Dest[0]=Src[0]; +// if (Length>1) { Dest[1]=Src[1]; +// if (Length>2) { Dest[2]=Src[2]; +// if (Length>3) { Dest[3]=Src[3]; +// if (Length>4) { Dest[4]=Src[4]; +// if (Length>5) { Dest[5]=Src[5]; +// if (Length>6) { Dest[6]=Src[6]; } } } } } } } // Close all nested "if"s. + } + else + while (Length-- > 0) // Slow copying with all possible precautions. + { + Window[UnpPtr]=Window[SrcPtr++ & MaxWinMask]; + // We need to have masked UnpPtr after quit from loop, so it must not + // be replaced with 'Window[UnpPtr++ & MaxWinMask]' + UnpPtr=(UnpPtr+1) & MaxWinMask; + } +} + + private uint DecodeNumber(BitInput Inp,DecodeTable Dec) +{ + // Left aligned 15 bit length raw bit field. + uint BitField=Inp.getbits() & 0xfffe; + + if (BitField>(int)(16-Dec.QuickBits); + Inp.addbits(Dec.QuickLen[Code]); + return Dec.QuickNum[Code]; + } + + // Detect the real bit length for current code. + uint Bits=15; + for (uint I=Dec.QuickBits+1;I<15;I++) + if (BitField>=(int)(16-Bits); + + // Now we can calculate the position in the code list. It is the sum + // of first position for current bit length and right aligned distance + // between our bit field and start code for current bit length. + uint Pos=Dec.DecodePos[Bits]+Dist; + + // Out of bounds safety check required for damaged archives. + if (Pos>=Dec.MaxNum) + Pos=0; + + // Convert the position in the code list to position in alphabet + // and return it. + return Dec.DecodeNum[Pos]; +} + + private uint SlotToLength(BitInput Inp,uint Slot) +{ + uint LBits,Length=2; + if (Slot<8) + { + LBits=0; + Length+=Slot; + } + else + { + LBits=Slot/4-1; + Length+=(4 | (Slot & 3)) << (int)LBits; + } + + if (LBits>0) + { + Length+=Inp.getbits()>>(int)(16-LBits); + Inp.addbits(LBits); + } + return Length; +} + + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/notes.txt b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/notes.txt new file mode 100644 index 0000000000..d56277c2f4 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/notes.txt @@ -0,0 +1,50 @@ +#if !Rar2017_64bit +using nint = System.Int32; +using nuint = System.UInt32; +using size_t = System.UInt32; +#else +using nint = System.Int64; +using nuint = System.UInt64; +using size_t = System.UInt64; +#endif +using int64 = System.Int64; + + +notes on C->C# primitive mappings: + +nint := native integer +nuint := native unsigned integer + +type 32b 64b mapping CAREFUL! +char 8 bit 8 bit short +unsigned char 8 bit 8 bit ushort +short int 16 bit 16 bit short + +short int 16 bit 16 bit short +unsigned short int 16 bit 16 bit ushort +int 32 bit 32 bit int +unsigned int 32 bit 32 bit uint +long int 32 bit 64 bit nint *** +unsigned long int 32 bit 64 bit nuint *** +long long int 64 bit 64 bit long +unsigned long long int 64 bit 64 bit ulong +size_t 32 bit 64 bit nuint + +The size_t type is the unsigned integer type that is the result of the sizeof operator (and the offsetof operator), +so it is guaranteed to be big enough to contain the size of the biggest object your system can handle (e.g., a static array of 8Gb). +[size_t] -> ulong (x64) +[size_t] -> uint (x86) + +size_t is an unsigned data type defined by several C/C++ standards, e.g. the C99 ISO/IEC 9899 standard, that is defined +in stddef.h.1 It can be further imported by inclusion of stdlib.h as this file internally sub includes stddef.h. +This type is used to represent the size of an object. Library functions that take or return sizes expect them to be of type or +have the return type of size_t. Further, the most frequently used compiler-based operator sizeof should evaluate to a constant +value that is compatible with size_t. + + +20171218 +urggh, this allows things like new int[int.MaxValue] but NOT new byte[uint.MaxValue] +currently arrays are limited to being indexed by an int hence int.MaxValue entries. weak. +To get arrays > 2GB on x64 we need to configure + +https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/unpack_hpp.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/unpack_hpp.cs new file mode 100644 index 0000000000..8586c98baa --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/UnpackV2017/unpack_hpp.cs @@ -0,0 +1,441 @@ +#if !Rar2017_64bit +using nint = System.Int32; +using nuint = System.UInt32; +using size_t = System.UInt32; +#else +using nint = System.Int64; +using nuint = System.UInt64; +using size_t = System.UInt64; +#endif +using int64 = System.Int64; + +using System.Collections.Generic; +using static SharpCompress.Compressors.Rar.UnpackV2017.PackDef; +using static SharpCompress.Compressors.Rar.UnpackV2017.UnpackGlobal; + +// TODO: REMOVE THIS... WIP +#pragma warning disable 169 +#pragma warning disable 414 + +namespace SharpCompress.Compressors.Rar.UnpackV2017 +{ + internal static class UnpackGlobal + { + + +// Maximum allowed number of compressed bits processed in quick mode. +public const int MAX_QUICK_DECODE_BITS =10; + +// Maximum number of filters per entire data block. Must be at least +// twice more than MAX_PACK_FILTERS to store filters from two data blocks. +public const int MAX_UNPACK_FILTERS =8192; + +// Maximum number of filters per entire data block for RAR3 unpack. +// Must be at least twice more than v3_MAX_PACK_FILTERS to store filters +// from two data blocks. +public const int MAX3_UNPACK_FILTERS =8192; + +// Limit maximum number of channels in RAR3 delta filter to some reasonable +// value to prevent too slow processing of corrupt archives with invalid +// channels number. Must be equal or larger than v3_MAX_FILTER_CHANNELS. +// No need to provide it for RAR5, which uses only 5 bits to store channels. +private const int MAX3_UNPACK_CHANNELS =1024; + +// Maximum size of single filter block. We restrict it to limit memory +// allocation. Must be equal or larger than MAX_ANALYZE_SIZE. +public const int MAX_FILTER_BLOCK_SIZE =0x400000; + +// Write data in 4 MB or smaller blocks. Must not exceed PACK_MAX_WRITE, +// so we keep number of buffered filter in unpacker reasonable. +public const int UNPACK_MAX_WRITE =0x400000; + } + +// Decode compressed bit fields to alphabet numbers. + internal sealed class DecodeTable +{ + // Real size of DecodeNum table. + public uint MaxNum; + + // Left aligned start and upper limit codes defining code space + // ranges for bit lengths. DecodeLen[BitLength-1] defines the start of + // range for bit length and DecodeLen[BitLength] defines next code + // after the end of range or in other words the upper limit code + // for specified bit length. + public readonly uint[] DecodeLen = new uint[16]; + + // Every item of this array contains the sum of all preceding items. + // So it contains the start position in code list for every bit length. + public readonly uint[] DecodePos = new uint[16]; + + // Number of compressed bits processed in quick mode. + // Must not exceed MAX_QUICK_DECODE_BITS. + public uint QuickBits; + + // Translates compressed bits (up to QuickBits length) + // to bit length in quick mode. + public readonly byte[] QuickLen = new byte[1< Filters = new List(); + + private readonly uint[] OldDist = new uint[4]; + private uint OldDistPtr; + private uint LastLength; + + // LastDist is necessary only for RAR2 and older with circular OldDist + // array. In RAR3 last distance is always stored in OldDist[0]. + private uint LastDist; + + private size_t UnpPtr,WrPtr; + + // Top border of read packed data. + private int ReadTop; + + // Border to call UnpReadBuf. We use it instead of (ReadTop-C) + // for optimization reasons. Ensures that we have C bytes in buffer + // unless we are at the end of file. + private int ReadBorder; + + private UnpackBlockHeader BlockHeader; + private UnpackBlockTables BlockTables; + + private size_t WriteBorder; + + private byte[] Window; + + private readonly FragmentedWindow FragWindow = new FragmentedWindow(); + private bool Fragmented; + + private int64 DestUnpSize; + + //bool Suspended; + private bool UnpAllBuf; + private bool UnpSomeRead; + private int64 WrittenFileSize; + private bool FileExtracted; + + +/***************************** Unpack v 1.5 *********************************/ + //void Unpack15(bool Solid); + //void ShortLZ(); + //void LongLZ(); + //void HuffDecode(); + //void GetFlagsBuf(); + //void UnpInitData15(int Solid); + //void InitHuff(); + //void CorrHuff(ushort *CharSet,byte *NumToPlace); + //void CopyString15(uint Distance,uint Length); + //uint DecodeNum(uint Num,uint StartPos,uint *DecTab,uint *PosTab); + + private readonly ushort[] ChSet = new ushort[256],ChSetA = new ushort[256],ChSetB = new ushort[256],ChSetC = new ushort[256]; + private readonly byte[] NToPl = new byte[256],NToPlB = new byte[256],NToPlC = new byte[256]; + private uint FlagBuf,AvrPlc,AvrPlcB,AvrLn1,AvrLn2,AvrLn3; + private int Buf60,NumHuf,StMode,LCount,FlagsCnt; + + private uint Nhfb,Nlzb,MaxDist3; +/***************************** Unpack v 1.5 *********************************/ + +/***************************** Unpack v 2.0 *********************************/ + //void Unpack20(bool Solid); + + private DecodeTable[] MD = new DecodeTable[4]; // Decode multimedia data, up to 4 channels. + + private readonly byte[] UnpOldTable20 = new byte[MC20*4]; + private bool UnpAudioBlock; + private uint UnpChannels,UnpCurChannel; + + private int UnpChannelDelta; + //void CopyString20(uint Length,uint Distance); + //bool ReadTables20(); + //void UnpWriteBuf20(); + //void UnpInitData20(int Solid); + //void ReadLastTables(); + //byte DecodeAudio(int Delta); + private AudioVariables[] AudV = new AudioVariables[4]; +/***************************** Unpack v 2.0 *********************************/ + +/***************************** Unpack v 3.0 *********************************/ + public const int BLOCK_LZ = 0; + public const int BLOCK_PPM = 1; + + //void UnpInitData30(bool Solid); + //void Unpack29(bool Solid); + //void InitFilters30(bool Solid); + //bool ReadEndOfBlock(); + //bool ReadVMCode(); + //bool ReadVMCodePPM(); + //bool AddVMCode(uint FirstByte,byte *Code,int CodeSize); + //int SafePPMDecodeChar(); + //bool ReadTables30(); + //bool UnpReadBuf30(); + //void UnpWriteBuf30(); + //void ExecuteCode(VM_PreparedProgram *Prg); + + private int PrevLowDist,LowDistRepCount; + +/*#if !RarV2017_RAR5ONLY + ModelPPM PPM; +#endif*/ + private int PPMEscChar; + + private readonly byte [] UnpOldTable = new byte[HUFF_TABLE_SIZE30]; + private int UnpBlockType; + + // If we already read decoding tables for Unpack v2,v3,v5. + // We should not use a single variable for all algorithm versions, + // because we can have a corrupt archive with one algorithm file + // followed by another algorithm file with "solid" flag and we do not + // want to reuse tables from one algorithm in another. + private bool TablesRead2,TablesRead3,TablesRead5; + + // Virtual machine to execute filters code. +/*#if !RarV2017_RAR5ONLY + RarVM VM; +#endif*/ + + // Buffer to read VM filters code. We moved it here from AddVMCode + // function to reduce time spent in BitInput constructor. + private readonly BitInput VMCodeInp = new BitInput(true); + + // Filters code, one entry per filter. + private readonly List Filters30 = new List(); + + // Filters stack, several entrances of same filter are possible. + private readonly List PrgStack = new List(); + + // Lengths of preceding data blocks, one length of one last block + // for every filter. Used to reduce the size required to write + // the data block length if lengths are repeating. + private readonly List OldFilterLengths = new List(); + + private int LastFilter; +/***************************** Unpack v 3.0 *********************************/ + + //Unpack(ComprDataIO *DataIO); + //~Unpack(); + //void Init(size_t WinSize,bool Solid); + //void DoUnpack(uint Method,bool Solid); + private bool IsFileExtracted() {return(FileExtracted);} + private void SetDestSize(int64 DestSize) {DestUnpSize=DestSize;FileExtracted=false;} + private void SetSuspended(bool Suspended) {this.Suspended=Suspended;} + +#if RarV2017_RAR_SMP + // More than 8 threads are unlikely to provide a noticeable gain + // for unpacking, but would use the additional memory. + void SetThreads(uint Threads) {MaxUserThreads=Min(Threads,8);} + + void UnpackDecode(UnpackThreadData &D); +#endif + + private size_t MaxWinSize; + private size_t MaxWinMask; + + private uint GetChar() + { + if (Inp.InAddr>MAX_SIZE-30) + UnpReadBuf(); + return(Inp.InBuf[Inp.InAddr++]); + } + + + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/BitInput.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/BitInput.cs new file mode 100644 index 0000000000..825759123c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/BitInput.cs @@ -0,0 +1,86 @@ +namespace SharpCompress.Compressors.Rar.VM +{ + internal class BitInput + { + /// the max size of the input + internal const int MAX_SIZE = 0x8000; + + public int inAddr; + public int inBit; + +// TODO: rename var + public int InAddr { get { return inAddr; } set { inAddr = value; } } + public int InBit { get { return inBit; } set { inBit = value; } } + public bool ExternalBuffer; + + + /// + internal BitInput() + { + InBuf = new byte[MAX_SIZE]; + } + + internal byte[] InBuf { get; } + + internal void InitBitInput() + { + inAddr = 0; + inBit = 0; + } + + internal void faddbits(uint bits) { + // TODO uint + AddBits((int)bits); + } + + /// + /// also named faddbits + /// + /// + internal void AddBits(int bits) + { + bits += inBit; + inAddr += (bits >> 3); + inBit = bits & 7; + } + + internal uint fgetbits() { + // TODO uint + return (uint)GetBits(); + } + + internal uint getbits() { + // TODO uint + return (uint)GetBits(); + } + + /// + /// (also named fgetbits) + /// + /// + /// the bits (unsigned short) + /// + internal int GetBits() + { + // int BitField=0; + // BitField|=(int)(inBuf[inAddr] << 16)&0xFF0000; + // BitField|=(int)(inBuf[inAddr+1] << 8)&0xff00; + // BitField|=(int)(inBuf[inAddr+2])&0xFF; + // BitField >>>= (8-inBit); + // return (BitField & 0xffff); + return ((Utility.URShift((((InBuf[inAddr] & 0xff) << 16) + + ((InBuf[inAddr + 1] & 0xff) << 8) + + ((InBuf[inAddr + 2] & 0xff))), (8 - inBit))) & 0xffff); + } + + /// Indicates an Overfow + /// how many bytes to inc + /// + /// true if an Oververflow would occur + /// + internal bool Overflow(int IncPtr) + { + return (inAddr + IncPtr >= MAX_SIZE); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/RarVM.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/RarVM.cs new file mode 100644 index 0000000000..8580f55b50 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/RarVM.cs @@ -0,0 +1,1452 @@ +using System; +using System.Collections.Generic; +using SharpCompress.Converters; + +namespace SharpCompress.Compressors.Rar.VM +{ + internal class RarVM : BitInput + { + //private void InitBlock() + //{ + // Mem.set_Renamed(offset + 0, Byte.valueOf((sbyte) (value_Renamed & 0xff))); + // Mem.set_Renamed(offset + 1, Byte.valueOf((sbyte) ((Utility.URShift(value_Renamed, 8)) & 0xff))); + // Mem.set_Renamed(offset + 2, Byte.valueOf((sbyte) ((Utility.URShift(value_Renamed, 16)) & 0xff))); + // Mem.set_Renamed(offset + 3, Byte.valueOf((sbyte) ((Utility.URShift(value_Renamed, 24)) & 0xff))); + + //} + internal byte[] Mem { get; private set; } + + public const int VM_MEMSIZE = 0x40000; + + //UPGRADE_NOTE: Final was removed from the declaration of 'VM_MEMMASK '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + public static readonly int VM_MEMMASK = (VM_MEMSIZE - 1); + + public const int VM_GLOBALMEMADDR = 0x3C000; + + public const int VM_GLOBALMEMSIZE = 0x2000; + + public const int VM_FIXEDGLOBALSIZE = 64; + + private const int regCount = 8; + + private const long UINT_MASK = 0xffffFFFF; //((long)2*(long)Integer.MAX_VALUE); + + private readonly int[] R = new int[regCount]; + + private VMFlags flags; + + private int maxOpCount = 25000000; + + private int codeSize; + + private int IP; + + internal RarVM() + { + //InitBlock(); + Mem = null; + } + + internal void init() + { + if (Mem == null) + { + Mem = new byte[VM_MEMSIZE + 4]; + } + } + + private bool IsVMMem(byte[] mem) + { + return Mem == mem; + } + + private int GetValue(bool byteMode, byte[] mem, int offset) + { + if (byteMode) + { + if (IsVMMem(mem)) + { + return (mem[offset]); + } + return (mem[offset] & 0xff); + } + if (IsVMMem(mem)) + { + return DataConverter.LittleEndian.GetInt32(mem, offset); + } + return DataConverter.BigEndian.GetInt32(mem, offset); + } + + private void SetValue(bool byteMode, byte[] mem, int offset, int value) + { + if (byteMode) + { + if (IsVMMem(mem)) + { + mem[offset] = (byte)value; + } + else + { + mem[offset] = (byte)((mem[offset] & 0x00) | (byte)(value & 0xff)); + } + } + else + { + if (IsVMMem(mem)) + { + DataConverter.LittleEndian.PutBytes(mem, offset, value); + } + else + { + DataConverter.BigEndian.PutBytes(mem, offset, value); + } + } + + // #define SET_VALUE(ByteMode,Addr,Value) SetValue(ByteMode,(uint + // *)Addr,Value) + } + + internal void SetLowEndianValue(List mem, int offset, int value) + { + mem[offset + 0] = (byte)(value & 0xff); + mem[offset + 1] = (byte)(Utility.URShift(value, 8) & 0xff); + mem[offset + 2] = (byte)(Utility.URShift(value, 16) & 0xff); + mem[offset + 3] = (byte)(Utility.URShift(value, 24) & 0xff); + } + + private int GetOperand(VMPreparedOperand cmdOp) + { + int ret = 0; + if (cmdOp.Type == VMOpType.VM_OPREGMEM) + { + int pos = (cmdOp.Offset + cmdOp.Base) & VM_MEMMASK; + ret = DataConverter.LittleEndian.GetInt32(Mem, pos); + } + else + { + int pos = cmdOp.Offset; + ret = DataConverter.LittleEndian.GetInt32(Mem, pos); + } + return ret; + } + + public void execute(VMPreparedProgram prg) + { + for (int i = 0; i < prg.InitR.Length; i++) + + // memcpy(R,Prg->InitR,sizeof(Prg->InitR)); + { + R[i] = prg.InitR[i]; + } + + long globalSize = Math.Min(prg.GlobalData.Count, VM_GLOBALMEMSIZE) & 0xffFFffFF; + if (globalSize != 0) + { + for (int i = 0; i < globalSize; i++) + + // memcpy(Mem+VM_GLOBALMEMADDR,&Prg->GlobalData[0],GlobalSize); + { + Mem[VM_GLOBALMEMADDR + i] = prg.GlobalData[i]; + } + } + long staticSize = Math.Min(prg.StaticData.Count, VM_GLOBALMEMSIZE - globalSize) & 0xffFFffFF; + if (staticSize != 0) + { + for (int i = 0; i < staticSize; i++) + + // memcpy(Mem+VM_GLOBALMEMADDR+GlobalSize,&Prg->StaticData[0],StaticSize); + { + Mem[VM_GLOBALMEMADDR + (int)globalSize + i] = prg.StaticData[i]; + } + } + R[7] = VM_MEMSIZE; + flags = 0; + + //UPGRADE_NOTE: There is an untranslated Statement. Please refer to original code. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1153'" + List preparedCode = prg.AltCommands.Count != 0 + ? prg + .AltCommands + : prg.Commands; + + if (!ExecuteCode(preparedCode, prg.CommandCount)) + { + preparedCode[0].OpCode = VMCommands.VM_RET; + } + int newBlockPos = GetValue(false, Mem, VM_GLOBALMEMADDR + 0x20) & VM_MEMMASK; + int newBlockSize = GetValue(false, Mem, VM_GLOBALMEMADDR + 0x1c) & VM_MEMMASK; + if ((newBlockPos + newBlockSize) >= VM_MEMSIZE) + { + newBlockPos = 0; + newBlockSize = 0; + } + + prg.FilteredDataOffset = newBlockPos; + prg.FilteredDataSize = newBlockSize; + + prg.GlobalData.Clear(); + + int dataSize = Math.Min(GetValue(false, Mem, VM_GLOBALMEMADDR + 0x30), VM_GLOBALMEMSIZE - VM_FIXEDGLOBALSIZE); + if (dataSize != 0) + { + //prg.GlobalData.Clear(); + // ->GlobalData.Add(dataSize+VM_FIXEDGLOBALSIZE); + prg.GlobalData.SetSize(dataSize + VM_FIXEDGLOBALSIZE); + for (int i = 0; i < dataSize + VM_FIXEDGLOBALSIZE; i++) + + // memcpy(&Prg->GlobalData[0],&Mem[VM_GLOBALMEMADDR],DataSize+VM_FIXEDGLOBALSIZE); + { + prg.GlobalData[i] = Mem[VM_GLOBALMEMADDR + i]; + } + } + } + + private bool setIP(int ip) + { + if ((ip) >= codeSize) + { + return (true); + } + + if (--maxOpCount <= 0) + { + return (false); + } + + IP = ip; + return true; + } + + private bool ExecuteCode(List preparedCode, + int cmdCount) + { + maxOpCount = 25000000; + codeSize = cmdCount; + IP = 0; + + while (true) + { + VMPreparedCommand cmd = preparedCode[IP]; + int op1 = GetOperand(cmd.Op1); + int op2 = GetOperand(cmd.Op2); + switch (cmd.OpCode) + { + case VMCommands.VM_MOV: + SetValue(cmd.IsByteMode, Mem, op1, GetValue(cmd.IsByteMode, Mem, op2)); + + // SET_VALUE(Cmd->ByteMode,Op1,GET_VALUE(Cmd->ByteMode,Op2)); + break; + + case VMCommands.VM_MOVB: + SetValue(true, Mem, op1, GetValue(true, Mem, op2)); + break; + + case VMCommands.VM_MOVD: + SetValue(false, Mem, op1, GetValue(false, Mem, op2)); + break; + + case VMCommands.VM_CMP: + { + VMFlags value1 = (VMFlags)GetValue(cmd.IsByteMode, Mem, op1); + VMFlags result = value1 - GetValue(cmd.IsByteMode, Mem, op2); + + if (result == 0) + { + flags = VMFlags.VM_FZ; + } + else + { + flags = (VMFlags)((result > value1) ? 1 : 0 | (int)(result & VMFlags.VM_FS)); + } + } + break; + + case VMCommands.VM_CMPB: + { + VMFlags value1 = (VMFlags)GetValue(true, Mem, op1); + VMFlags result = value1 - GetValue(true, Mem, op2); + if (result == 0) + { + flags = VMFlags.VM_FZ; + } + else + { + flags = (VMFlags)((result > value1) ? 1 : 0 | (int)(result & VMFlags.VM_FS)); + } + } + break; + + case VMCommands.VM_CMPD: + { + VMFlags value1 = (VMFlags)GetValue(false, Mem, op1); + VMFlags result = value1 - GetValue(false, Mem, op2); + if (result == 0) + { + flags = VMFlags.VM_FZ; + } + else + { + flags = (VMFlags)((result > value1) ? 1 : 0 | (int)(result & VMFlags.VM_FS)); + } + } + break; + + case VMCommands.VM_ADD: + { + int value1 = GetValue(cmd.IsByteMode, Mem, op1); + int result = + (int) + (((value1 + (long)GetValue(cmd.IsByteMode, Mem, op2))) & + unchecked((int)0xffffffff)); + if (cmd.IsByteMode) + { + result &= 0xff; + flags = + (VMFlags) + ((result < value1) + ? 1 + : 0 | + (result == 0 + ? (int)VMFlags.VM_FZ + : (((result & 0x80) != 0) ? (int)VMFlags.VM_FS : 0))); + + // Flags=(Result value1) ? 1 : 0 | (result & (int)VMFlags.VM_FS))); + SetValue(cmd.IsByteMode, Mem, op1, result); // (Cmd->ByteMode,Op1,Result); + } + break; + + case VMCommands.VM_SUBB: + SetValue(true, Mem, op1, + (int) + (GetValue(true, Mem, op1) & 0xFFffFFff - GetValue(true, Mem, op2) & + unchecked((int)0xFFffFFff))); + break; + + case VMCommands.VM_SUBD: + SetValue(false, Mem, op1, + (int) + (GetValue(false, Mem, op1) & 0xFFffFFff - GetValue(false, Mem, op2) & + unchecked((int)0xFFffFFff))); + break; + + case VMCommands.VM_JZ: + if ((flags & VMFlags.VM_FZ) != 0) + { + setIP(GetValue(false, Mem, op1)); + continue; + } + break; + + case VMCommands.VM_JNZ: + if ((flags & VMFlags.VM_FZ) == 0) + { + setIP(GetValue(false, Mem, op1)); + continue; + } + break; + + case VMCommands.VM_INC: + { + int result = (int)(GetValue(cmd.IsByteMode, Mem, op1) & 0xFFffFFffL + 1L); + if (cmd.IsByteMode) + { + result &= 0xff; + } + + SetValue(cmd.IsByteMode, Mem, op1, result); + flags = (VMFlags)(result == 0 ? (int)VMFlags.VM_FZ : result & (int)VMFlags.VM_FS); + } + break; + + case VMCommands.VM_INCB: + SetValue(true, Mem, op1, (int)(GetValue(true, Mem, op1) & 0xFFffFFffL + 1L)); + break; + + case VMCommands.VM_INCD: + SetValue(false, Mem, op1, (int)(GetValue(false, Mem, op1) & 0xFFffFFffL + 1L)); + break; + + case VMCommands.VM_DEC: + { + int result = (int)(GetValue(cmd.IsByteMode, Mem, op1) & 0xFFffFFff - 1); + SetValue(cmd.IsByteMode, Mem, op1, result); + flags = (VMFlags)(result == 0 ? (int)VMFlags.VM_FZ : result & (int)VMFlags.VM_FS); + } + break; + + case VMCommands.VM_DECB: + SetValue(true, Mem, op1, (int)(GetValue(true, Mem, op1) & 0xFFffFFff - 1)); + break; + + case VMCommands.VM_DECD: + SetValue(false, Mem, op1, (int)(GetValue(false, Mem, op1) & 0xFFffFFff - 1)); + break; + + case VMCommands.VM_JMP: + setIP(GetValue(false, Mem, op1)); + continue; + + case VMCommands.VM_XOR: + { + int result = GetValue(cmd.IsByteMode, Mem, op1) ^ GetValue(cmd.IsByteMode, Mem, op2); + flags = (VMFlags)(result == 0 ? (int)VMFlags.VM_FZ : result & (int)VMFlags.VM_FS); + SetValue(cmd.IsByteMode, Mem, op1, result); + } + break; + + case VMCommands.VM_AND: + { + int result = GetValue(cmd.IsByteMode, Mem, op1) & GetValue(cmd.IsByteMode, Mem, op2); + flags = (VMFlags)(result == 0 ? (int)VMFlags.VM_FZ : result & (int)VMFlags.VM_FS); + SetValue(cmd.IsByteMode, Mem, op1, result); + } + break; + + case VMCommands.VM_OR: + { + int result = GetValue(cmd.IsByteMode, Mem, op1) | GetValue(cmd.IsByteMode, Mem, op2); + flags = (VMFlags)(result == 0 ? (int)VMFlags.VM_FZ : result & (int)VMFlags.VM_FS); + SetValue(cmd.IsByteMode, Mem, op1, result); + } + break; + + case VMCommands.VM_TEST: + { + int result = GetValue(cmd.IsByteMode, Mem, op1) & GetValue(cmd.IsByteMode, Mem, op2); + flags = (VMFlags)(result == 0 ? (int)VMFlags.VM_FZ : result & (int)VMFlags.VM_FS); + } + break; + + case VMCommands.VM_JS: + if ((flags & VMFlags.VM_FS) != 0) + { + setIP(GetValue(false, Mem, op1)); + continue; + } + break; + + case VMCommands.VM_JNS: + if ((flags & VMFlags.VM_FS) == 0) + { + setIP(GetValue(false, Mem, op1)); + continue; + } + break; + + case VMCommands.VM_JB: + if ((flags & VMFlags.VM_FC) != 0) + { + setIP(GetValue(false, Mem, op1)); + continue; + } + break; + + case VMCommands.VM_JBE: + if ((flags & (VMFlags.VM_FC | VMFlags.VM_FZ)) != 0) + { + setIP(GetValue(false, Mem, op1)); + continue; + } + break; + + case VMCommands.VM_JA: + if ((flags & (VMFlags.VM_FC | VMFlags.VM_FZ)) == 0) + { + setIP(GetValue(false, Mem, op1)); + continue; + } + break; + + case VMCommands.VM_JAE: + if ((flags & VMFlags.VM_FC) == 0) + { + setIP(GetValue(false, Mem, op1)); + continue; + } + break; + + case VMCommands.VM_PUSH: + R[7] -= 4; + SetValue(false, Mem, R[7] & VM_MEMMASK, GetValue(false, Mem, op1)); + break; + + case VMCommands.VM_POP: + SetValue(false, Mem, op1, GetValue(false, Mem, R[7] & VM_MEMMASK)); + R[7] += 4; + break; + + case VMCommands.VM_CALL: + R[7] -= 4; + SetValue(false, Mem, R[7] & VM_MEMMASK, IP + 1); + setIP(GetValue(false, Mem, op1)); + continue; + + case VMCommands.VM_NOT: + SetValue(cmd.IsByteMode, Mem, op1, ~GetValue(cmd.IsByteMode, Mem, op1)); + break; + + case VMCommands.VM_SHL: + { + int value1 = GetValue(cmd.IsByteMode, Mem, op1); + int value2 = GetValue(cmd.IsByteMode, Mem, op2); + int result = value1 << value2; + flags = + (VMFlags) + ((result == 0 ? (int)VMFlags.VM_FZ : (result & (int)VMFlags.VM_FS)) | + (((value1 << (value2 - 1)) & unchecked((int)0x80000000)) != 0 + ? (int)VMFlags.VM_FC + : 0)); + SetValue(cmd.IsByteMode, Mem, op1, result); + } + break; + + case VMCommands.VM_SHR: + { + int value1 = GetValue(cmd.IsByteMode, Mem, op1); + int value2 = GetValue(cmd.IsByteMode, Mem, op2); + int result = Utility.URShift(value1, value2); + flags = + (VMFlags) + ((result == 0 ? (int)VMFlags.VM_FZ : (result & (int)VMFlags.VM_FS)) | + ((Utility.URShift(value1, (value2 - 1))) & (int)VMFlags.VM_FC)); + SetValue(cmd.IsByteMode, Mem, op1, result); + } + break; + + case VMCommands.VM_SAR: + { + int value1 = GetValue(cmd.IsByteMode, Mem, op1); + int value2 = GetValue(cmd.IsByteMode, Mem, op2); + int result = value1 >> value2; + flags = + (VMFlags) + ((result == 0 ? (int)VMFlags.VM_FZ : (result & (int)VMFlags.VM_FS)) | + ((value1 >> (value2 - 1)) & (int)VMFlags.VM_FC)); + SetValue(cmd.IsByteMode, Mem, op1, result); + } + break; + + case VMCommands.VM_NEG: + { + int result = -GetValue(cmd.IsByteMode, Mem, op1); + flags = + (VMFlags) + (result == 0 + ? (int)VMFlags.VM_FZ + : (int)VMFlags.VM_FC | (result & (int)VMFlags.VM_FS)); + SetValue(cmd.IsByteMode, Mem, op1, result); + } + break; + + case VMCommands.VM_NEGB: + SetValue(true, Mem, op1, -GetValue(true, Mem, op1)); + break; + + case VMCommands.VM_NEGD: + SetValue(false, Mem, op1, -GetValue(false, Mem, op1)); + break; + + case VMCommands.VM_PUSHA: + { + for (int i = 0, SP = R[7] - 4; i < regCount; i++, SP -= 4) + { + SetValue(false, Mem, SP & VM_MEMMASK, R[i]); + } + R[7] -= regCount * 4; + } + break; + + case VMCommands.VM_POPA: + { + for (int i = 0, SP = R[7]; i < regCount; i++, SP += 4) + { + R[7 - i] = GetValue(false, Mem, SP & VM_MEMMASK); + } + } + break; + + case VMCommands.VM_PUSHF: + R[7] -= 4; + SetValue(false, Mem, R[7] & VM_MEMMASK, (int)flags); + break; + + case VMCommands.VM_POPF: + flags = (VMFlags)GetValue(false, Mem, R[7] & VM_MEMMASK); + R[7] += 4; + break; + + case VMCommands.VM_MOVZX: + SetValue(false, Mem, op1, GetValue(true, Mem, op2)); + break; + + case VMCommands.VM_MOVSX: + SetValue(false, Mem, op1, (byte)GetValue(true, Mem, op2)); + break; + + case VMCommands.VM_XCHG: + { + int value1 = GetValue(cmd.IsByteMode, Mem, op1); + SetValue(cmd.IsByteMode, Mem, op1, GetValue(cmd.IsByteMode, Mem, op2)); + SetValue(cmd.IsByteMode, Mem, op2, value1); + } + break; + + case VMCommands.VM_MUL: + { + int result = + (int) + ((GetValue(cmd.IsByteMode, Mem, op1) & + 0xFFffFFff * GetValue(cmd.IsByteMode, Mem, op2) & unchecked((int)0xFFffFFff)) & + unchecked((int)0xFFffFFff)); + SetValue(cmd.IsByteMode, Mem, op1, result); + } + break; + + case VMCommands.VM_DIV: + { + int divider = GetValue(cmd.IsByteMode, Mem, op2); + if (divider != 0) + { + int result = GetValue(cmd.IsByteMode, Mem, op1) / divider; + SetValue(cmd.IsByteMode, Mem, op1, result); + } + } + break; + + case VMCommands.VM_ADC: + { + int value1 = GetValue(cmd.IsByteMode, Mem, op1); + int FC = (int)(flags & VMFlags.VM_FC); + int result = + (int) + (value1 & 0xFFffFFff + GetValue(cmd.IsByteMode, Mem, op2) & + 0xFFffFFff + FC & unchecked((int)0xFFffFFff)); + if (cmd.IsByteMode) + { + result &= 0xff; + } + + flags = + (VMFlags) + ((result < value1 || result == value1 && FC != 0) + ? 1 + : 0 | (result == 0 ? (int)VMFlags.VM_FZ : (result & (int)VMFlags.VM_FS))); + SetValue(cmd.IsByteMode, Mem, op1, result); + } + break; + + case VMCommands.VM_SBB: + { + int value1 = GetValue(cmd.IsByteMode, Mem, op1); + int FC = (int)(flags & VMFlags.VM_FC); + int result = + (int) + (value1 & 0xFFffFFff - GetValue(cmd.IsByteMode, Mem, op2) & + 0xFFffFFff - FC & unchecked((int)0xFFffFFff)); + if (cmd.IsByteMode) + { + result &= 0xff; + } + flags = + (VMFlags) + ((result > value1 || result == value1 && FC != 0) + ? 1 + : 0 | (result == 0 ? (int)VMFlags.VM_FZ : (result & (int)VMFlags.VM_FS))); + SetValue(cmd.IsByteMode, Mem, op1, result); + } + break; + + case VMCommands.VM_RET: + if (R[7] >= VM_MEMSIZE) + { + return (true); + } + setIP(GetValue(false, Mem, R[7] & VM_MEMMASK)); + R[7] += 4; + continue; + + case VMCommands.VM_STANDARD: + ExecuteStandardFilter((VMStandardFilters)(cmd.Op1.Data)); + break; + + case VMCommands.VM_PRINT: + break; + } + IP++; + --maxOpCount; + } + } + + public void prepare(byte[] code, int codeSize, VMPreparedProgram prg) + { + InitBitInput(); + int cpLength = Math.Min(MAX_SIZE, codeSize); + + // memcpy(inBuf,Code,Min(CodeSize,BitInput::MAX_SIZE)); + + Buffer.BlockCopy(code, 0, InBuf, 0, cpLength); + byte xorSum = 0; + for (int i = 1; i < codeSize; i++) + { + xorSum ^= code[i]; + } + + AddBits(8); + + prg.CommandCount = 0; + if (xorSum == code[0]) + { + VMStandardFilters filterType = IsStandardFilter(code, codeSize); + if (filterType != VMStandardFilters.VMSF_NONE) + { + VMPreparedCommand curCmd = new VMPreparedCommand(); + curCmd.OpCode = VMCommands.VM_STANDARD; + curCmd.Op1.Data = (int)filterType; + curCmd.Op1.Type = VMOpType.VM_OPNONE; + curCmd.Op2.Type = VMOpType.VM_OPNONE; + codeSize = 0; + prg.Commands.Add(curCmd); + prg.CommandCount = prg.CommandCount + 1; + + // TODO + // curCmd->Op1.Data=FilterType; + // >>>>>> CurCmd->Op1.Addr=&CurCmd->Op1.Data; <<<<<<<<<< not set + // do i need to ? + // >>>>>> CurCmd->Op2.Addr=&CurCmd->Op2.Data; <<<<<<<<<< " + // CurCmd->Op1.Type=CurCmd->Op2.Type=VM_OPNONE; + // CodeSize=0; + } + int dataFlag = GetBits(); + AddBits(1); + + // Read static data contained in DB operators. This data cannot be + // changed, + // it is a part of VM code, not a filter parameter. + + if ((dataFlag & 0x8000) != 0) + { + long dataSize = ReadData(this) & 0xffFFffFFL + 1L; + for (int i = 0; inAddr < codeSize && i < dataSize; i++) + { + prg.StaticData.Add((byte)(GetBits() >> 8)); + AddBits(8); + } + } + + while (inAddr < codeSize) + { + VMPreparedCommand curCmd = new VMPreparedCommand(); + int data = GetBits(); + if ((data & 0x8000) == 0) + { + curCmd.OpCode = (VMCommands)((data >> 12)); + AddBits(4); + } + else + { + curCmd.OpCode = (VMCommands)((data >> 10) - 24); + AddBits(6); + } + if ((VMCmdFlags.VM_CmdFlags[(int)curCmd.OpCode] & VMCmdFlags.VMCF_BYTEMODE) != 0) + { + curCmd.IsByteMode = (GetBits() >> 15) == 1 ? true : false; + AddBits(1); + } + else + { + curCmd.IsByteMode = false; + } + curCmd.Op1.Type = VMOpType.VM_OPNONE; + curCmd.Op2.Type = VMOpType.VM_OPNONE; + + int opNum = (VMCmdFlags.VM_CmdFlags[(int)curCmd.OpCode] & VMCmdFlags.VMCF_OPMASK); + + // TODO >>> CurCmd->Op1.Addr=CurCmd->Op2.Addr=NULL; << 0) + { + decodeArg(curCmd.Op1, curCmd.IsByteMode); + if (opNum == 2) + { + decodeArg(curCmd.Op2, curCmd.IsByteMode); + } + else + { + if (curCmd.Op1.Type == VMOpType.VM_OPINT && + (VMCmdFlags.VM_CmdFlags[(int)curCmd.OpCode] & + (VMCmdFlags.VMCF_JUMP | VMCmdFlags.VMCF_PROC)) != 0) + { + int distance = curCmd.Op1.Data; + if (distance >= 256) + { + distance -= 256; + } + else + { + if (distance >= 136) + { + distance -= 264; + } + else + { + if (distance >= 16) + { + distance -= 8; + } + else + { + if (distance >= 8) + { + distance -= 16; + } + } + } + distance += prg.CommandCount; + } + curCmd.Op1.Data = distance; + } + } + } + prg.CommandCount = (prg.CommandCount + 1); + prg.Commands.Add(curCmd); + } + } + VMPreparedCommand curCmd2 = new VMPreparedCommand(); + curCmd2.OpCode = VMCommands.VM_RET; + + // TODO CurCmd->Op1.Addr=&CurCmd->Op1.Data; + // CurCmd->Op2.Addr=&CurCmd->Op2.Data; + curCmd2.Op1.Type = VMOpType.VM_OPNONE; + curCmd2.Op2.Type = VMOpType.VM_OPNONE; + + // for (int i=0;iCmd[I]; + // if (Cmd->Op1.Addr==NULL) + // Cmd->Op1.Addr=&Cmd->Op1.Data; + // if (Cmd->Op2.Addr==NULL) + // Cmd->Op2.Addr=&Cmd->Op2.Data; + // } + + prg.Commands.Add(curCmd2); + prg.CommandCount = prg.CommandCount + 1; + + // #ifdef VM_OPTIMIZE + if (codeSize != 0) + { + optimize(prg); + } + } + + private void decodeArg(VMPreparedOperand op, bool byteMode) + { + int data = GetBits(); + if ((data & 0x8000) != 0) + { + op.Type = VMOpType.VM_OPREG; + op.Data = (data >> 12) & 7; + op.Offset = op.Data; + AddBits(4); + } + else + { + if ((data & 0xc000) == 0) + { + op.Type = VMOpType.VM_OPINT; + if (byteMode) + { + op.Data = (data >> 6) & 0xff; + AddBits(10); + } + else + { + AddBits(2); + op.Data = ReadData(this); + } + } + else + { + op.Type = VMOpType.VM_OPREGMEM; + if ((data & 0x2000) == 0) + { + op.Data = (data >> 10) & 7; + op.Offset = op.Data; + op.Base = 0; + AddBits(6); + } + else + { + if ((data & 0x1000) == 0) + { + op.Data = (data >> 9) & 7; + op.Offset = op.Data; + AddBits(7); + } + else + { + op.Data = 0; + AddBits(4); + } + op.Base = ReadData(this); + } + } + } + } + + private void optimize(VMPreparedProgram prg) + { + //UPGRADE_NOTE: There is an untranslated Statement. Please refer to original code. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1153'" + + List commands = prg.Commands; + + //UPGRADE_ISSUE: The following fragment of code could not be parsed and was not converted. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1156'" + foreach (VMPreparedCommand cmd in commands) + { + switch (cmd.OpCode) + { + case VMCommands.VM_MOV: + cmd.OpCode = cmd.IsByteMode ? VMCommands.VM_MOVB : VMCommands.VM_MOVD; + continue; + + case VMCommands.VM_CMP: + cmd.OpCode = cmd.IsByteMode ? VMCommands.VM_CMPB : VMCommands.VM_CMPD; + continue; + } + if ((VMCmdFlags.VM_CmdFlags[(int)cmd.OpCode] & VMCmdFlags.VMCF_CHFLAGS) == 0) + { + continue; + } + bool flagsRequired = false; + + for (int i = commands.IndexOf(cmd) + 1; i < commands.Count; i++) + { + int flags = VMCmdFlags.VM_CmdFlags[(int)commands[i].OpCode]; + if ((flags & (VMCmdFlags.VMCF_JUMP | VMCmdFlags.VMCF_PROC | VMCmdFlags.VMCF_USEFLAGS)) != 0) + { + flagsRequired = true; + break; + } + if ((flags & VMCmdFlags.VMCF_CHFLAGS) != 0) + { + break; + } + } + if (flagsRequired) + { + continue; + } + switch (cmd.OpCode) + { + case VMCommands.VM_ADD: + cmd.OpCode = cmd.IsByteMode ? VMCommands.VM_ADDB : VMCommands.VM_ADDD; + continue; + + case VMCommands.VM_SUB: + cmd.OpCode = cmd.IsByteMode ? VMCommands.VM_SUBB : VMCommands.VM_SUBD; + continue; + + case VMCommands.VM_INC: + cmd.OpCode = cmd.IsByteMode ? VMCommands.VM_INCB : VMCommands.VM_INCD; + continue; + + case VMCommands.VM_DEC: + cmd.OpCode = cmd.IsByteMode ? VMCommands.VM_DECB : VMCommands.VM_DECD; + continue; + + case VMCommands.VM_NEG: + cmd.OpCode = cmd.IsByteMode ? VMCommands.VM_NEGB : VMCommands.VM_NEGD; + continue; + } + } + } + + internal static int ReadData(BitInput rarVM) + { + int data = rarVM.GetBits(); + switch (data & 0xc000) + { + case 0: + rarVM.AddBits(6); + return ((data >> 10) & 0xf); + + case 0x4000: + if ((data & 0x3c00) == 0) + { + data = unchecked((int)0xffffff00) | ((data >> 2) & 0xff); + rarVM.AddBits(14); + } + else + { + data = (data >> 6) & 0xff; + rarVM.AddBits(10); + } + return (data); + + case 0x8000: + rarVM.AddBits(2); + data = rarVM.GetBits(); + rarVM.AddBits(16); + return (data); + + default: + rarVM.AddBits(2); + data = (rarVM.GetBits() << 16); + rarVM.AddBits(16); + data |= rarVM.GetBits(); + rarVM.AddBits(16); + return (data); + } + } + + private VMStandardFilters IsStandardFilter(byte[] code, int codeSize) + { + VMStandardFilterSignature[] stdList = + { + new VMStandardFilterSignature(53, 0xad576887, + VMStandardFilters.VMSF_E8), + new VMStandardFilterSignature(57, 0x3cd7e57e, + VMStandardFilters.VMSF_E8E9), + new VMStandardFilterSignature(120, 0x3769893f, + VMStandardFilters.VMSF_ITANIUM), + new VMStandardFilterSignature(29, 0x0e06077d, + VMStandardFilters.VMSF_DELTA), + new VMStandardFilterSignature(149, 0x1c2c5dc8, + VMStandardFilters.VMSF_RGB), + new VMStandardFilterSignature(216, 0xbc85e701, + VMStandardFilters.VMSF_AUDIO), + new VMStandardFilterSignature(40, 0x46b9c560, + VMStandardFilters.VMSF_UPCASE) + }; + uint CodeCRC = RarCRC.CheckCrc(0xffffffff, code, 0, code.Length) ^ 0xffffffff; + for (int i = 0; i < stdList.Length; i++) + { + if (stdList[i].CRC == CodeCRC && stdList[i].Length == code.Length) + { + return (stdList[i].Type); + } + } + return (VMStandardFilters.VMSF_NONE); + } + + private void ExecuteStandardFilter(VMStandardFilters filterType) + { + switch (filterType) + { + case VMStandardFilters.VMSF_E8: + case VMStandardFilters.VMSF_E8E9: + { + int dataSize = R[4]; + long fileOffset = R[6] & unchecked((int)0xFFffFFff); + + if (dataSize >= VM_GLOBALMEMADDR) + { + break; + } + int fileSize = 0x1000000; + byte cmpByte2 = (byte)((filterType == VMStandardFilters.VMSF_E8E9) ? 0xe9 : 0xe8); + for (int curPos = 0; curPos < dataSize - 4;) + { + byte curByte = Mem[curPos++]; + if (curByte == 0xe8 || curByte == cmpByte2) + { + // #ifdef PRESENT_INT32 + // sint32 Offset=CurPos+FileOffset; + // sint32 Addr=GET_VALUE(false,Data); + // if (Addr<0) + // { + // if (Addr+Offset>=0) + // SET_VALUE(false,Data,Addr+FileSize); + // } + // else + // if (Addr= VM_GLOBALMEMADDR) + { + break; + } + int curPos = 0; + + //UPGRADE_NOTE: Final was removed from the declaration of 'Masks '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" + byte[] Masks = {4, 4, 6, 6, 0, 0, 7, 7, 4, 4, 0, 0, 4, 4, 0, 0}; + fileOffset = Utility.URShift(fileOffset, 4); + + while (curPos < dataSize - 21) + { + int Byte = (Mem[curPos] & 0x1f) - 0x10; + if (Byte >= 0) + { + byte cmdMask = Masks[Byte]; + if (cmdMask != 0) + { + for (int i = 0; i <= 2; i++) + { + if ((cmdMask & (1 << i)) != 0) + { + int startPos = i * 41 + 5; + int opType = filterItanium_GetBits(curPos, startPos + 37, 4); + if (opType == 5) + { + int offset = filterItanium_GetBits(curPos, startPos + 13, 20); + filterItanium_SetBits(curPos, (int)(offset - fileOffset) & 0xfffff, + startPos + 13, 20); + } + } + } + } + } + curPos += 16; + fileOffset++; + } + } + break; + + case VMStandardFilters.VMSF_DELTA: + { + int dataSize = R[4] & unchecked((int)0xFFffFFff); + int channels = R[0] & unchecked((int)0xFFffFFff); + int srcPos = 0; + int border = (dataSize * 2) & unchecked((int)0xFFffFFff); + SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize); + if (dataSize >= VM_GLOBALMEMADDR / 2) + { + break; + } + + // bytes from same channels are grouped to continual data blocks, + // so we need to place them back to their interleaving positions + + for (int curChannel = 0; curChannel < channels; curChannel++) + { + byte PrevByte = 0; + for (int destPos = dataSize + curChannel; destPos < border; destPos += channels) + { + Mem[destPos] = (PrevByte = (byte)(PrevByte - Mem[srcPos++])); + } + } + } + break; + + case VMStandardFilters.VMSF_RGB: + { + // byte *SrcData=Mem,*DestData=SrcData+DataSize; + int dataSize = R[4], width = R[0] - 3, posR = R[1]; + int channels = 3; + int srcPos = 0; + int destDataPos = dataSize; + SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize); + if (dataSize >= VM_GLOBALMEMADDR / 2 || posR < 0) + { + break; + } + for (int curChannel = 0; curChannel < channels; curChannel++) + { + long prevByte = 0; + + for (int i = curChannel; i < dataSize; i += channels) + { + long predicted; + int upperPos = i - width; + if (upperPos >= 3) + { + int upperDataPos = destDataPos + upperPos; + int upperByte = Mem[upperDataPos] & 0xff; + int upperLeftByte = Mem[upperDataPos - 3] & 0xff; + predicted = prevByte + upperByte - upperLeftByte; + int pa = Math.Abs((int)(predicted - prevByte)); + int pb = Math.Abs((int)(predicted - upperByte)); + int pc = Math.Abs((int)(predicted - upperLeftByte)); + if (pa <= pb && pa <= pc) + { + predicted = prevByte; + } + else + { + if (pb <= pc) + { + predicted = upperByte; + } + else + { + predicted = upperLeftByte; + } + } + } + else + { + predicted = prevByte; + } + + prevByte = (predicted - Mem[srcPos++] & 0xff) & 0xff; + Mem[destDataPos + i] = (byte)(prevByte & 0xff); + } + } + for (int i = posR, border = dataSize - 2; i < border; i += 3) + { + byte G = Mem[destDataPos + i + 1]; + Mem[destDataPos + i] = (byte)(Mem[destDataPos + i] + G); + Mem[destDataPos + i + 2] = (byte)(Mem[destDataPos + i + 2] + G); + } + } + break; + + case VMStandardFilters.VMSF_AUDIO: + { + int dataSize = R[4], channels = R[0]; + int srcPos = 0; + int destDataPos = dataSize; + + //byte *SrcData=Mem,*DestData=SrcData+DataSize; + SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize); + if (dataSize >= VM_GLOBALMEMADDR / 2) + { + break; + } + for (int curChannel = 0; curChannel < channels; curChannel++) + { + long prevByte = 0; + long prevDelta = 0; + long[] Dif = new long[7]; + int D1 = 0, D2 = 0, D3; + int K1 = 0, K2 = 0, K3 = 0; + + for (int i = curChannel, byteCount = 0; i < dataSize; i += channels, byteCount++) + { + D3 = D2; + D2 = (int)(prevDelta - D1); + D1 = (int)prevDelta; + + long predicted = 8 * prevByte + K1 * D1 + K2 * D2 + K3 * D3; + predicted = Utility.URShift(predicted, 3) & 0xff; + + long curByte = Mem[srcPos++]; + + predicted -= curByte; + Mem[destDataPos + i] = (byte)predicted; + prevDelta = (byte)(predicted - prevByte); + + //fix java byte + if (prevDelta >= 128) + { + prevDelta = 0 - (256 - prevDelta); + } + prevByte = predicted; + + //fix java byte + if (curByte >= 128) + { + curByte = 0 - (256 - curByte); + } + int D = ((int)curByte) << 3; + + Dif[0] += Math.Abs(D); + Dif[1] += Math.Abs(D - D1); + Dif[2] += Math.Abs(D + D1); + Dif[3] += Math.Abs(D - D2); + Dif[4] += Math.Abs(D + D2); + Dif[5] += Math.Abs(D - D3); + Dif[6] += Math.Abs(D + D3); + + if ((byteCount & 0x1f) == 0) + { + long minDif = Dif[0], numMinDif = 0; + Dif[0] = 0; + for (int j = 1; j < Dif.Length; j++) + { + if (Dif[j] < minDif) + { + minDif = Dif[j]; + numMinDif = j; + } + Dif[j] = 0; + } + switch ((int)numMinDif) + { + case 1: + if (K1 >= -16) + { + K1--; + } + break; + + case 2: + if (K1 < 16) + { + K1++; + } + break; + + case 3: + if (K2 >= -16) + { + K2--; + } + break; + + case 4: + if (K2 < 16) + { + K2++; + } + break; + + case 5: + if (K3 >= -16) + { + K3--; + } + break; + + case 6: + if (K3 < 16) + { + K3++; + } + break; + } + } + } + } + } + break; + + case VMStandardFilters.VMSF_UPCASE: + { + int dataSize = R[4], srcPos = 0, destPos = dataSize; + if (dataSize >= VM_GLOBALMEMADDR / 2) + { + break; + } + while (srcPos < dataSize) + { + byte curByte = Mem[srcPos++]; + if (curByte == 2 && (curByte = Mem[srcPos++]) != 2) + { + curByte = (byte)(curByte - 32); + } + Mem[destPos++] = curByte; + } + SetValue(false, Mem, VM_GLOBALMEMADDR + 0x1c, destPos - dataSize); + SetValue(false, Mem, VM_GLOBALMEMADDR + 0x20, dataSize); + } + break; + } + } + + private void filterItanium_SetBits(int curPos, int bitField, int bitPos, int bitCount) + { + int inAddr = bitPos / 8; + int inBit = bitPos & 7; + int andMask = Utility.URShift(unchecked((int)0xffffffff), (32 - bitCount)); + andMask = ~(andMask << inBit); + + bitField <<= inBit; + + for (int i = 0; i < 4; i++) + { + Mem[curPos + inAddr + i] &= (byte)(andMask); + Mem[curPos + inAddr + i] |= (byte)(bitField); + andMask = (Utility.URShift(andMask, 8)) | unchecked((int)0xff000000); + bitField = Utility.URShift(bitField, 8); + } + } + + private int filterItanium_GetBits(int curPos, int bitPos, int bitCount) + { + int inAddr = bitPos / 8; + int inBit = bitPos & 7; + int bitField = Mem[curPos + inAddr++] & 0xff; + bitField |= (Mem[curPos + inAddr++] & 0xff) << 8; + bitField |= (Mem[curPos + inAddr++] & 0xff) << 16; + bitField |= (Mem[curPos + inAddr] & 0xff) << 24; + bitField = Utility.URShift(bitField, inBit); + return (bitField & (Utility.URShift(unchecked((int)0xffffffff), (32 - bitCount)))); + } + + public virtual void setMemory(int pos, byte[] data, int offset, int dataSize) + { + if (pos < VM_MEMSIZE) + { + //&& data!=Mem+Pos) + //memmove(Mem+Pos,Data,Min(DataSize,VM_MEMSIZE-Pos)); + for (int i = 0; i < Math.Min(data.Length - offset, dataSize); i++) + { + if ((VM_MEMSIZE - pos) < i) + { + break; + } + Mem[pos + i] = data[offset + i]; + } + } + } + } + + // +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMCmdFlags.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMCmdFlags.cs new file mode 100644 index 0000000000..55ae8dfe2d --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMCmdFlags.cs @@ -0,0 +1,46 @@ +namespace SharpCompress.Compressors.Rar.VM +{ + internal class VMCmdFlags + { + public const byte VMCF_OP0 = 0; + public const byte VMCF_OP1 = 1; + public const byte VMCF_OP2 = 2; + public const byte VMCF_OPMASK = 3; + public const byte VMCF_BYTEMODE = 4; + public const byte VMCF_JUMP = 8; + public const byte VMCF_PROC = 16; + public const byte VMCF_USEFLAGS = 32; + public const byte VMCF_CHFLAGS = 64; + + public static byte[] VM_CmdFlags = + { + VMCF_OP2 | VMCF_BYTEMODE, VMCF_OP2 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP1 | VMCF_JUMP | VMCF_USEFLAGS, + VMCF_OP1 | VMCF_JUMP | VMCF_USEFLAGS, + VMCF_OP1 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP1 | VMCF_BYTEMODE | VMCF_CHFLAGS, VMCF_OP1 | VMCF_JUMP, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP1 | VMCF_JUMP | VMCF_USEFLAGS, + VMCF_OP1 | VMCF_JUMP | VMCF_USEFLAGS, + VMCF_OP1 | VMCF_JUMP | VMCF_USEFLAGS, + VMCF_OP1 | VMCF_JUMP | VMCF_USEFLAGS, + VMCF_OP1 | VMCF_JUMP | VMCF_USEFLAGS, + VMCF_OP1 | VMCF_JUMP | VMCF_USEFLAGS, VMCF_OP1, VMCF_OP1, + VMCF_OP1 | VMCF_PROC, VMCF_OP0 | VMCF_PROC, VMCF_OP1 | VMCF_BYTEMODE, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_CHFLAGS, + VMCF_OP1 | VMCF_BYTEMODE | VMCF_CHFLAGS, VMCF_OP0, VMCF_OP0, + VMCF_OP0 | VMCF_USEFLAGS, VMCF_OP0 | VMCF_CHFLAGS, VMCF_OP2, VMCF_OP2, + VMCF_OP2 | VMCF_BYTEMODE, VMCF_OP2 | VMCF_BYTEMODE, + VMCF_OP2 | VMCF_BYTEMODE, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_USEFLAGS | VMCF_CHFLAGS, + VMCF_OP2 | VMCF_BYTEMODE | VMCF_USEFLAGS | VMCF_CHFLAGS, VMCF_OP0 + }; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMCommands.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMCommands.cs new file mode 100644 index 0000000000..5164c4c5f1 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMCommands.cs @@ -0,0 +1,78 @@ +namespace SharpCompress.Compressors.Rar.VM +{ + internal enum VMCommands + { + VM_MOV = 0, + VM_CMP = 1, + VM_ADD = 2, + VM_SUB = 3, + VM_JZ = 4, + VM_JNZ = 5, + VM_INC = 6, + + VM_DEC = + 7, + VM_JMP = 8, + VM_XOR = 9, + VM_AND = 10, + VM_OR = 11, + VM_TEST = 12, + + VM_JS = + 13, + VM_JNS = 14, + VM_JB = 15, + VM_JBE = 16, + VM_JA = 17, + VM_JAE = 18, + + VM_PUSH = + 19, + VM_POP = 20, + VM_CALL = 21, + VM_RET = 22, + VM_NOT = 23, + VM_SHL = 24, + + VM_SHR = + 25, + VM_SAR = 26, + VM_NEG = 27, + VM_PUSHA = 28, + VM_POPA = 29, + VM_PUSHF = 30, + + VM_POPF = + 31, + VM_MOVZX = 32, + VM_MOVSX = 33, + VM_XCHG = 34, + VM_MUL = 35, + VM_DIV = 36, + + VM_ADC = + 37, + VM_SBB = 38, + VM_PRINT = 39, + + VM_MOVB = 40, + VM_MOVD = 41, + VM_CMPB = 42, + VM_CMPD = 43, + + VM_ADDB = 44, + VM_ADDD = 45, + VM_SUBB = 46, + VM_SUBD = 47, + VM_INCB = 48, + VM_INCD = 49, + + VM_DECB = + 50, + VM_DECD = 51, + VM_NEGB = 52, + VM_NEGD = 53, + + VM_STANDARD = 54 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMFlags.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMFlags.cs new file mode 100644 index 0000000000..f0c8fd72ae --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMFlags.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Rar.VM +{ + internal enum VMFlags + { + None = 0, + VM_FC = 1, + VM_FZ = 2, + VM_FS = 80000000 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMOpType.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMOpType.cs new file mode 100644 index 0000000000..9ea99d1ac0 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMOpType.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Rar.VM +{ + internal enum VMOpType + { + VM_OPREG = 0, + VM_OPINT = 1, + VM_OPREGMEM = 2, + VM_OPNONE = 3 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedCommand.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedCommand.cs new file mode 100644 index 0000000000..fc3238b5b2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedCommand.cs @@ -0,0 +1,17 @@ +namespace SharpCompress.Compressors.Rar.VM +{ + internal class VMPreparedCommand + { + internal VMPreparedCommand() + { + Op1 = new VMPreparedOperand(); + Op2 = new VMPreparedOperand(); + } + + internal VMCommands OpCode { get; set; } + internal bool IsByteMode { get; set; } + internal VMPreparedOperand Op1 { get; } + + internal VMPreparedOperand Op2 { get; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedOperand.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedOperand.cs new file mode 100644 index 0000000000..b8cb10d96e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedOperand.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Rar.VM +{ + internal class VMPreparedOperand + { + internal VMOpType Type { get; set; } + internal int Data { get; set; } + internal int Base { get; set; } + internal int Offset { get; set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedProgram.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedProgram.cs new file mode 100644 index 0000000000..94da8a76bd --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMPreparedProgram.cs @@ -0,0 +1,21 @@ +using System.Collections.Generic; + +namespace SharpCompress.Compressors.Rar.VM +{ + internal class VMPreparedProgram + { + internal List Commands = new List(); + internal List AltCommands = new List(); + + public int CommandCount { get; set; } + + internal List GlobalData = new List(); + internal List StaticData = new List(); + + // static data contained in DB operators + internal int[] InitR = new int[7]; + + internal int FilteredDataOffset { get; set; } + internal int FilteredDataSize { get; set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMStandardFilterSignature.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMStandardFilterSignature.cs new file mode 100644 index 0000000000..a5812ed026 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMStandardFilterSignature.cs @@ -0,0 +1,18 @@ +namespace SharpCompress.Compressors.Rar.VM +{ + internal class VMStandardFilterSignature + { + internal VMStandardFilterSignature(int length, uint crc, VMStandardFilters type) + { + Length = length; + CRC = crc; + Type = type; + } + + internal int Length { get; } + + internal uint CRC { get; } + + internal VMStandardFilters Type { get; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMStandardFilters.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMStandardFilters.cs new file mode 100644 index 0000000000..07f743486a --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Rar/VM/VMStandardFilters.cs @@ -0,0 +1,14 @@ +namespace SharpCompress.Compressors.Rar.VM +{ + internal enum VMStandardFilters + { + VMSF_NONE = 0, + VMSF_E8 = 1, + VMSF_E8E9 = 2, + VMSF_ITANIUM = 3, + VMSF_RGB = 4, + VMSF_AUDIO = 5, + VMSF_DELTA = 6, + VMSF_UPCASE = 7 + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/BinaryUtils.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/BinaryUtils.cs new file mode 100644 index 0000000000..f63770befc --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/BinaryUtils.cs @@ -0,0 +1,54 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.Xz +{ + public static class BinaryUtils + { + public static int ReadLittleEndianInt32(this BinaryReader reader) + { + byte[] bytes = reader.ReadBytes(4); + return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24)); + } + + internal static uint ReadLittleEndianUInt32(this BinaryReader reader) + { + return unchecked((uint)ReadLittleEndianInt32(reader)); + } + public static int ReadLittleEndianInt32(this Stream stream) + { + byte[] bytes = new byte[4]; + var read = stream.ReadFully(bytes); + if (!read) + { + throw new EndOfStreamException(); + } + return (bytes[0] + (bytes[1] << 8) + (bytes[2] << 16) + (bytes[3] << 24)); + } + + internal static uint ReadLittleEndianUInt32(this Stream stream) + { + return unchecked((uint)ReadLittleEndianInt32(stream)); + } + + internal static byte[] ToBigEndianBytes(this uint uint32) + { + var result = BitConverter.GetBytes(uint32); + + if (BitConverter.IsLittleEndian) + Array.Reverse(result); + + return result; + } + + internal static byte[] ToLittleEndianBytes(this uint uint32) + { + var result = BitConverter.GetBytes(uint32); + + if (!BitConverter.IsLittleEndian) + Array.Reverse(result); + + return result; + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/CheckType.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/CheckType.cs new file mode 100644 index 0000000000..b818445165 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/CheckType.cs @@ -0,0 +1,10 @@ +namespace SharpCompress.Compressors.Xz +{ + public enum CheckType : byte + { + NONE = 0x00, + CRC32 = 0x01, + CRC64 = 0x04, + SHA256 = 0x0A + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Crc32.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Crc32.cs new file mode 100644 index 0000000000..72c37ba1f7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Crc32.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; + +namespace SharpCompress.Compressors.Xz +{ + internal static class Crc32 + { + public const UInt32 DefaultPolynomial = 0xedb88320u; + public const UInt32 DefaultSeed = 0xffffffffu; + + private static UInt32[] defaultTable; + + public static UInt32 Compute(byte[] buffer) + { + return Compute(DefaultSeed, buffer); + } + + public static UInt32 Compute(UInt32 seed, byte[] buffer) + { + return Compute(DefaultPolynomial, seed, buffer); + } + + public static UInt32 Compute(UInt32 polynomial, UInt32 seed, byte[] buffer) + { + return ~CalculateHash(InitializeTable(polynomial), seed, buffer, 0, buffer.Length); + } + + private static UInt32[] InitializeTable(UInt32 polynomial) + { + if (polynomial == DefaultPolynomial && defaultTable != null) + return defaultTable; + + var createTable = new UInt32[256]; + for (var i = 0; i < 256; i++) + { + var entry = (UInt32)i; + for (var j = 0; j < 8; j++) + if ((entry & 1) == 1) + entry = (entry >> 1) ^ polynomial; + else + entry = entry >> 1; + createTable[i] = entry; + } + + if (polynomial == DefaultPolynomial) + defaultTable = createTable; + + return createTable; + } + + private static UInt32 CalculateHash(UInt32[] table, UInt32 seed, IList buffer, int start, int size) + { + var crc = seed; + for (var i = start; i < size - start; i++) + crc = (crc >> 8) ^ table[buffer[i] ^ crc & 0xff]; + return crc; + } + + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Crc64.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Crc64.cs new file mode 100644 index 0000000000..bf6a0692f1 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Crc64.cs @@ -0,0 +1,57 @@ +using System; +using System.Collections.Generic; + +namespace SharpCompress.Compressors.Xz +{ + internal static class Crc64 + { + public const UInt64 DefaultSeed = 0x0; + + internal static UInt64[] Table; + + public const UInt64 Iso3309Polynomial = 0xD800000000000000; + + public static UInt64 Compute(byte[] buffer) + { + return Compute(DefaultSeed, buffer); + } + + public static UInt64 Compute(UInt64 seed, byte[] buffer) + { + if (Table == null) + Table = CreateTable(Iso3309Polynomial); + + return CalculateHash(seed, Table, buffer, 0, buffer.Length); + } + + public static UInt64 CalculateHash(UInt64 seed, UInt64[] table, IList buffer, int start, int size) + { + var crc = seed; + + for (var i = start; i < size; i++) + unchecked + { + crc = (crc >> 8) ^ table[(buffer[i] ^ crc) & 0xff]; + } + + return crc; + } + + public static ulong[] CreateTable(ulong polynomial) + { + var createTable = new UInt64[256]; + for (var i = 0; i < 256; ++i) + { + var entry = (UInt64)i; + for (var j = 0; j < 8; ++j) + if ((entry & 1) == 1) + entry = (entry >> 1) ^ polynomial; + else + entry = entry >> 1; + createTable[i] = entry; + } + return createTable; + } + } + +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Filters/BlockFilter.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Filters/BlockFilter.cs new file mode 100644 index 0000000000..8a13ec07fd --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Filters/BlockFilter.cs @@ -0,0 +1,51 @@ +using System; +using System.Collections.Generic; +using System.IO; + +namespace SharpCompress.Compressors.Xz.Filters +{ + internal abstract class BlockFilter : ReadOnlyStream + { + public enum FilterTypes : ulong + { + DELTA = 0x03, + ARCH_x86_FILTER = 0x04, + ARCH_PowerPC_FILTER = 0x05, + ARCH_IA64_FILTER = 0x06, + ARCH_ARM_FILTER = 0x07, + ARCH_ARMTHUMB_FILTER = 0x08, + ARCH_SPARC_FILTER = 0x09, + LZMA2 = 0x21 + } + + private static readonly Dictionary FilterMap = new Dictionary + { + {FilterTypes.LZMA2, typeof(Lzma2Filter) } + }; + + public abstract bool AllowAsLast { get; } + public abstract bool AllowAsNonLast { get; } + public abstract bool ChangesDataSize { get; } + + public abstract void Init(byte[] properties); + public abstract void ValidateFilter(); + + public FilterTypes FilterType { get; set; } + public static BlockFilter Read(BinaryReader reader) + { + var filterType = (FilterTypes)reader.ReadXZInteger(); + if (!FilterMap.ContainsKey(filterType)) + throw new NotImplementedException($"Filter {filterType} has not yet been implemented"); + var filter = Activator.CreateInstance(FilterMap[filterType]) as BlockFilter; + + var sizeOfProperties = reader.ReadXZInteger(); + if (sizeOfProperties > int.MaxValue) + throw new InvalidDataException("Block filter information too large"); + byte[] properties = reader.ReadBytes((int)sizeOfProperties); + filter.Init(properties); + return filter; + } + + public abstract void SetBaseStream(Stream stream); + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Filters/Lzma2Filter.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Filters/Lzma2Filter.cs new file mode 100644 index 0000000000..cd2869c9e1 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/Filters/Lzma2Filter.cs @@ -0,0 +1,60 @@ +using System; +using System.IO; +using SharpCompress.Compressors.LZMA; + +namespace SharpCompress.Compressors.Xz.Filters +{ + internal class Lzma2Filter : BlockFilter + { + public override bool AllowAsLast => true; + public override bool AllowAsNonLast => false; + public override bool ChangesDataSize => true; + + private byte _dictionarySize; + public uint DictionarySize + { + get + { + if (_dictionarySize > 40) + throw new OverflowException("Dictionary size greater than UInt32.Max"); + if (_dictionarySize == 40) + { + return uint.MaxValue; + } + int mantissa = 2 | (_dictionarySize & 1); + int exponent = _dictionarySize / 2 + 11; + return (uint)mantissa << exponent; + } + } + + public override void Init(byte[] properties) + { + if (properties.Length != 1) + throw new InvalidDataException("LZMA properties unexpected length"); + + _dictionarySize = (byte)(properties[0] & 0x3F); + var reserved = properties[0] & 0xC0; + if (reserved != 0) + throw new InvalidDataException("Reserved bits used in LZMA properties"); + } + + public override void ValidateFilter() + { + } + + public override void SetBaseStream(Stream stream) + { + BaseStream = new LzmaStream(new[] { _dictionarySize }, stream); + } + + public override int Read(byte[] buffer, int offset, int count) + { + return BaseStream.Read(buffer, offset, count); + } + + public override int ReadByte() + { + return BaseStream.ReadByte(); + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/MultiByteIntegers.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/MultiByteIntegers.cs new file mode 100644 index 0000000000..247951288b --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/MultiByteIntegers.cs @@ -0,0 +1,32 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.Xz +{ + internal static class MultiByteIntegers + { + public static ulong ReadXZInteger(this BinaryReader reader, int MaxBytes = 9) + { + if (MaxBytes <= 0) + throw new ArgumentOutOfRangeException(); + if (MaxBytes > 9) + MaxBytes = 9; + + byte LastByte = reader.ReadByte(); + ulong Output = (ulong)LastByte & 0x7F; + + int i = 0; + while ((LastByte & 0x80) != 0) + { + if (++i >= MaxBytes) + throw new InvalidDataException(); + LastByte = reader.ReadByte(); + if (LastByte == 0) + throw new InvalidDataException(); + + Output |= ((ulong)(LastByte & 0x7F)) << (i * 7); + } + return Output; + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/ReadOnlyStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/ReadOnlyStream.cs new file mode 100644 index 0000000000..9790609d1e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/ReadOnlyStream.cs @@ -0,0 +1,44 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.Xz +{ + public abstract class ReadOnlyStream : Stream + { + public Stream BaseStream { get; protected set; } + + public override bool CanRead => BaseStream.CanRead; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override long Length => throw new NotSupportedException(); + + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZBlock.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZBlock.cs new file mode 100644 index 0000000000..96ecef171f --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZBlock.cs @@ -0,0 +1,165 @@ +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Compressors.Xz.Filters; + +namespace SharpCompress.Compressors.Xz +{ + internal sealed class XZBlock : XZReadOnlyStream + { + public int BlockHeaderSize => (_blockHeaderSizeByte + 1) * 4; + public ulong? CompressedSize { get; private set; } + public ulong? UncompressedSize { get; private set; } + public Stack Filters { get; private set; } = new Stack(); + public bool HeaderIsLoaded { get; private set; } + private CheckType _checkType; + private readonly int _checkSize; + private bool _streamConnected; + private int _numFilters; + private byte _blockHeaderSizeByte; + private Stream _decomStream; + private bool _endOfStream; + private bool _paddingSkipped; + private bool _crcChecked; + private ulong _bytesRead; + + public XZBlock(Stream stream, CheckType checkType, int checkSize) : base(stream) + { + _checkType = checkType; + _checkSize = checkSize; + } + + public override int Read(byte[] buffer, int offset, int count) + { + int bytesRead = 0; + if (!HeaderIsLoaded) + LoadHeader(); + if (!_streamConnected) + ConnectStream(); + if (!_endOfStream) + bytesRead = _decomStream.Read(buffer, offset, count); + if (bytesRead != count) + _endOfStream = true; + if (_endOfStream && !_paddingSkipped) + SkipPadding(); + if (_endOfStream && !_crcChecked) + CheckCrc(); + _bytesRead += (ulong)bytesRead; + return bytesRead; + } + + private void SkipPadding() + { + int bytes = (int)(BaseStream.Position % 4); + if (bytes > 0) + { + byte[] paddingBytes = new byte[4 - bytes]; + BaseStream.Read(paddingBytes, 0, paddingBytes.Length); + if (paddingBytes.Any(b => b != 0)) + throw new InvalidDataException("Padding bytes were non-null"); + } + _paddingSkipped = true; + } + + private void CheckCrc() + { + byte[] crc = new byte[_checkSize]; + BaseStream.Read(crc, 0, _checkSize); + // Actually do a check (and read in the bytes + // into the function throughout the stream read). + _crcChecked = true; + } + + private void ConnectStream() + { + _decomStream = BaseStream; + while (Filters.Any()) + { + var filter = Filters.Pop(); + filter.SetBaseStream(_decomStream); + _decomStream = filter; + } + _streamConnected = true; + } + + private void LoadHeader() + { + ReadHeaderSize(); + byte[] headerCache = CacheHeader(); + + using (var cache = new MemoryStream(headerCache)) + using (var cachedReader = new BinaryReader(cache)) + { + cachedReader.BaseStream.Position = 1; // skip the header size byte + ReadBlockFlags(cachedReader); + ReadFilters(cachedReader); + } + HeaderIsLoaded = true; + } + + private void ReadHeaderSize() + { + _blockHeaderSizeByte = (byte)BaseStream.ReadByte(); + if (_blockHeaderSizeByte == 0) + throw new XZIndexMarkerReachedException(); + } + + private byte[] CacheHeader() + { + byte[] blockHeaderWithoutCrc = new byte[BlockHeaderSize - 4]; + blockHeaderWithoutCrc[0] = _blockHeaderSizeByte; + var read = BaseStream.Read(blockHeaderWithoutCrc, 1, BlockHeaderSize - 5); + if (read != BlockHeaderSize - 5) + throw new EndOfStreamException("Reached end of stream unexectedly"); + + uint crc = BaseStream.ReadLittleEndianUInt32(); + uint calcCrc = Crc32.Compute(blockHeaderWithoutCrc); + if (crc != calcCrc) + throw new InvalidDataException("Block header corrupt"); + + return blockHeaderWithoutCrc; + } + + private void ReadBlockFlags(BinaryReader reader) + { + var blockFlags = reader.ReadByte(); + _numFilters = (blockFlags & 0x03) + 1; + byte reserved = (byte)(blockFlags & 0x3C); + + if (reserved != 0) + throw new InvalidDataException("Reserved bytes used, perhaps an unknown XZ implementation"); + + bool compressedSizePresent = (blockFlags & 0x40) != 0; + bool uncompressedSizePresent = (blockFlags & 0x80) != 0; + + if (compressedSizePresent) + CompressedSize = reader.ReadXZInteger(); + if (uncompressedSizePresent) + UncompressedSize = reader.ReadXZInteger(); + } + + private void ReadFilters(BinaryReader reader, long baseStreamOffset = 0) + { + int nonLastSizeChangers = 0; + for (int i = 0; i < _numFilters; i++) + { + var filter = BlockFilter.Read(reader); + if ((i + 1 == _numFilters && !filter.AllowAsLast) + || (i + 1 < _numFilters && !filter.AllowAsNonLast)) + throw new InvalidDataException("Block Filters in bad order"); + if (filter.ChangesDataSize && i + 1 < _numFilters) + nonLastSizeChangers++; + filter.ValidateFilter(); + Filters.Push(filter); + } + if (nonLastSizeChangers > 2) + throw new InvalidDataException("More than two non-last block filters cannot change stream size"); + + int blockHeaderPaddingSize = BlockHeaderSize - + (4 + (int)(reader.BaseStream.Position - baseStreamOffset)); + byte[] blockHeaderPadding = reader.ReadBytes(blockHeaderPaddingSize); + if (!blockHeaderPadding.All(b => b == 0)) + throw new InvalidDataException("Block header contains unknown fields"); + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZFooter.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZFooter.cs new file mode 100644 index 0000000000..8339bc65fe --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZFooter.cs @@ -0,0 +1,49 @@ +using System.IO; +using System.Linq; +using System.Text; +using SharpCompress.IO; + +namespace SharpCompress.Compressors.Xz +{ + public class XZFooter + { + private readonly BinaryReader _reader; + private readonly byte[] _magicBytes = { 0x59, 0x5A }; + public long StreamStartPosition { get; private set; } + public long BackwardSize { get; private set; } + public byte[] StreamFlags { get; private set; } + + public XZFooter(BinaryReader reader) + { + _reader = reader; + StreamStartPosition = reader.BaseStream.Position; + } + + public static XZFooter FromStream(Stream stream) + { + var footer = new XZFooter(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8)); + footer.Process(); + return footer; + } + + public void Process() + { + uint crc = _reader.ReadLittleEndianUInt32(); + byte[] footerBytes = _reader.ReadBytes(6); + uint myCrc = Crc32.Compute(footerBytes); + if (crc != myCrc) + throw new InvalidDataException("Footer corrupt"); + using (var stream = new MemoryStream(footerBytes)) + using (var reader = new BinaryReader(stream)) + { + BackwardSize = (reader.ReadLittleEndianUInt32() + 1) * 4; + StreamFlags = reader.ReadBytes(2); + } + byte[] magBy = _reader.ReadBytes(2); + if (!magBy.SequenceEqual(_magicBytes)) + { + throw new InvalidDataException("Magic footer missing"); + } + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZHeader.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZHeader.cs new file mode 100644 index 0000000000..16588d6e2a --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZHeader.cs @@ -0,0 +1,55 @@ +using System; +using System.IO; +using System.Linq; +using System.Text; +using SharpCompress.IO; + +namespace SharpCompress.Compressors.Xz +{ + public class XZHeader + { + private readonly BinaryReader _reader; + private readonly byte[] MagicHeader = { 0xFD, 0x37, 0x7A, 0x58, 0x5a, 0x00 }; + + public CheckType BlockCheckType { get; private set; } + public int BlockCheckSize => ((((int)BlockCheckType) + 2) / 3) * 4; + + public XZHeader(BinaryReader reader) + { + _reader = reader; + } + + public static XZHeader FromStream(Stream stream) + { + var header = new XZHeader(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8)); + header.Process(); + return header; + } + + public void Process() + { + CheckMagicBytes(_reader.ReadBytes(6)); + ProcessStreamFlags(); + } + + private void ProcessStreamFlags() + { + byte[] streamFlags = _reader.ReadBytes(2); + UInt32 crc = _reader.ReadLittleEndianUInt32(); + UInt32 calcCrc = Crc32.Compute(streamFlags); + if (crc != calcCrc) + throw new InvalidDataException("Stream header corrupt"); + + BlockCheckType = (CheckType)(streamFlags[1] & 0x0F); + byte futureUse = (byte)(streamFlags[1] & 0xF0); + if (futureUse != 0 || streamFlags[0] != 0) + throw new InvalidDataException("Unknown XZ Stream Version"); + } + + private void CheckMagicBytes(byte[] header) + { + if (!header.SequenceEqual(MagicHeader)) + throw new InvalidDataException("Invalid XZ Stream"); + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndex.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndex.cs new file mode 100644 index 0000000000..736f978af5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndex.cs @@ -0,0 +1,73 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using SharpCompress.IO; + +namespace SharpCompress.Compressors.Xz +{ + [CLSCompliant(false)] + public class XZIndex + { + private readonly BinaryReader _reader; + public long StreamStartPosition { get; private set; } + public ulong NumberOfRecords { get; private set; } + public List Records { get; } = new List(); + + private readonly bool _indexMarkerAlreadyVerified; + + public XZIndex(BinaryReader reader, bool indexMarkerAlreadyVerified) + { + _reader = reader; + _indexMarkerAlreadyVerified = indexMarkerAlreadyVerified; + StreamStartPosition = reader.BaseStream.Position; + if (indexMarkerAlreadyVerified) + StreamStartPosition--; + } + + public static XZIndex FromStream(Stream stream, bool indexMarkerAlreadyVerified) + { + var index = new XZIndex(new BinaryReader(new NonDisposingStream(stream), Encoding.UTF8), indexMarkerAlreadyVerified); + index.Process(); + return index; + } + + public void Process() + { + if (!_indexMarkerAlreadyVerified) + VerifyIndexMarker(); + NumberOfRecords = _reader.ReadXZInteger(); + for (ulong i = 0; i < NumberOfRecords; i++) + { + Records.Add(XZIndexRecord.FromBinaryReader(_reader)); + } + SkipPadding(); + VerifyCrc32(); + } + + private void VerifyIndexMarker() + { + byte marker = _reader.ReadByte(); + if (marker != 0) + throw new InvalidDataException("Not an index block"); + } + + private void SkipPadding() + { + int bytes = (int)(_reader.BaseStream.Position - StreamStartPosition) % 4; + if (bytes > 0) + { + byte[] paddingBytes = _reader.ReadBytes(4 - bytes); + if (paddingBytes.Any(b => b != 0)) + throw new InvalidDataException("Padding bytes were non-null"); + } + } + + private void VerifyCrc32() + { + uint crc = _reader.ReadLittleEndianUInt32(); + // TODO verify this matches + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndexMarkerReachedException.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndexMarkerReachedException.cs new file mode 100644 index 0000000000..387a2a1eb2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndexMarkerReachedException.cs @@ -0,0 +1,8 @@ +using System; + +namespace SharpCompress.Compressors.Xz +{ + public class XZIndexMarkerReachedException : Exception + { + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndexRecord.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndexRecord.cs new file mode 100644 index 0000000000..f0ee443e65 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZIndexRecord.cs @@ -0,0 +1,22 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.Xz +{ + [CLSCompliant(false)] + public class XZIndexRecord + { + public ulong UnpaddedSize { get; private set; } + public ulong UncompressedSize { get; private set; } + + protected XZIndexRecord() { } + + public static XZIndexRecord FromBinaryReader(BinaryReader br) + { + var record = new XZIndexRecord(); + record.UnpaddedSize = br.ReadXZInteger(); + record.UncompressedSize = br.ReadXZInteger(); + return record; + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZReadOnlyStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZReadOnlyStream.cs new file mode 100644 index 0000000000..ae124b1505 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZReadOnlyStream.cs @@ -0,0 +1,14 @@ +using System.IO; + +namespace SharpCompress.Compressors.Xz +{ + public abstract class XZReadOnlyStream : ReadOnlyStream + { + public XZReadOnlyStream(Stream stream) + { + BaseStream = stream; + if (!BaseStream.CanRead) + throw new InvalidDataException("Must be able to read from stream"); + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZStream.cs b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZStream.cs new file mode 100644 index 0000000000..df755c2680 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Compressors/Xz/XZStream.cs @@ -0,0 +1,116 @@ +using System; +using System.IO; + +namespace SharpCompress.Compressors.Xz +{ + [CLSCompliant(false)] + public sealed class XZStream : XZReadOnlyStream + { + public static bool IsXZStream(Stream stream) + { + try + { + return null != XZHeader.FromStream(stream); + } + catch (Exception) + { + return false; + } + } + + private void AssertBlockCheckTypeIsSupported() + { + switch (Header.BlockCheckType) + { + case CheckType.NONE: + break; + case CheckType.CRC32: + break; + case CheckType.CRC64: + break; + case CheckType.SHA256: + throw new NotImplementedException(); + default: + throw new NotSupportedException("Check Type unknown to this version of decoder."); + } + } + public XZHeader Header { get; private set; } + public XZIndex Index { get; private set; } + public XZFooter Footer { get; private set; } + public bool HeaderIsRead { get; private set; } + private XZBlock _currentBlock; + + private bool _endOfStream; + + public XZStream(Stream stream) : base(stream) + { + } + + public override int Read(byte[] buffer, int offset, int count) + { + int bytesRead = 0; + if (_endOfStream) + return bytesRead; + if (!HeaderIsRead) + ReadHeader(); + bytesRead = ReadBlocks(buffer, offset, count); + if (bytesRead < count) + { + _endOfStream = true; + ReadIndex(); + ReadFooter(); + } + return bytesRead; + } + + private void ReadHeader() + { + Header = XZHeader.FromStream(BaseStream); + AssertBlockCheckTypeIsSupported(); + HeaderIsRead = true; + } + + private void ReadIndex() + { + Index = XZIndex.FromStream(BaseStream, true); + // TODO veryfy Index + } + + private void ReadFooter() + { + Footer = XZFooter.FromStream(BaseStream); + // TODO verify footer + } + + private int ReadBlocks(byte[] buffer, int offset, int count) + { + int bytesRead = 0; + if (_currentBlock == null) + NextBlock(); + for (;;) + { + try + { + if (bytesRead >= count) + break; + int remaining = count - bytesRead; + int newOffset = offset + bytesRead; + int justRead = _currentBlock.Read(buffer, newOffset, remaining); + if (justRead < remaining) + NextBlock(); + bytesRead += justRead; + } + catch (XZIndexMarkerReachedException) + { + break; + } + } + return bytesRead; + } + + private void NextBlock() + { + _currentBlock = new XZBlock(BaseStream, Header.BlockCheckType, Header.BlockCheckSize); + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Converters/DataConverter.cs b/BizHawk.Client.Common/SharpCompress/Converters/DataConverter.cs new file mode 100644 index 0000000000..6be04586ce --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Converters/DataConverter.cs @@ -0,0 +1,1405 @@ +// +// Authors: +// Miguel de Icaza (miguel@novell.com) +// +// See the following url for documentation: +// http://www.mono-project.com/Mono_DataConvert +// +// Compilation Options: +// MONO_DATACONVERTER_PUBLIC: +// Makes the class public instead of the default internal. +// +// MONO_DATACONVERTER_STATIC_METHODS: +// Exposes the public static methods. +// +// TODO: +// Support for "DoubleWordsAreSwapped" for ARM devices +// +// Copyright (C) 2006 Novell, Inc (http://www.novell.com) +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +// + +using System; + +#pragma warning disable 3021 + +namespace SharpCompress.Converters +{ +#if MONO_DATACONVERTER_PUBLIC + public +#endif + + internal unsafe abstract class DataConverter + { + // Disables the warning: CLS compliance checking will not be performed on + // `XXXX' because it is not visible from outside this assembly +#pragma warning disable 3019 + private static readonly DataConverter SwapConv = new SwapConverter(); + + public static readonly bool IsLittleEndian = BitConverter.IsLittleEndian; + + public abstract double GetDouble(byte[] data, int index); + public abstract float GetFloat(byte[] data, int index); + public abstract long GetInt64(byte[] data, int index); + public abstract int GetInt32(byte[] data, int index); + + public abstract short GetInt16(byte[] data, int index); + + [CLSCompliant(false)] + public abstract uint GetUInt32(byte[] data, int index); + + [CLSCompliant(false)] + public abstract ushort GetUInt16(byte[] data, int index); + + [CLSCompliant(false)] + public abstract ulong GetUInt64(byte[] data, int index); + + public abstract void PutBytes(byte[] dest, int destIdx, double value); + public abstract void PutBytes(byte[] dest, int destIdx, float value); + public abstract void PutBytes(byte[] dest, int destIdx, int value); + public abstract void PutBytes(byte[] dest, int destIdx, long value); + public abstract void PutBytes(byte[] dest, int destIdx, short value); + + [CLSCompliant(false)] + public abstract void PutBytes(byte[] dest, int destIdx, ushort value); + + [CLSCompliant(false)] + public abstract void PutBytes(byte[] dest, int destIdx, uint value); + + [CLSCompliant(false)] + public abstract void PutBytes(byte[] dest, int destIdx, ulong value); + + public byte[] GetBytes(double value) + { + byte[] ret = new byte[8]; + PutBytes(ret, 0, value); + return ret; + } + + public byte[] GetBytes(float value) + { + byte[] ret = new byte[4]; + PutBytes(ret, 0, value); + return ret; + } + + public byte[] GetBytes(int value) + { + byte[] ret = new byte[4]; + PutBytes(ret, 0, value); + return ret; + } + + public byte[] GetBytes(long value) + { + byte[] ret = new byte[8]; + PutBytes(ret, 0, value); + return ret; + } + + public byte[] GetBytes(short value) + { + byte[] ret = new byte[2]; + PutBytes(ret, 0, value); + return ret; + } + + [CLSCompliant(false)] + public byte[] GetBytes(ushort value) + { + byte[] ret = new byte[2]; + PutBytes(ret, 0, value); + return ret; + } + + [CLSCompliant(false)] + public byte[] GetBytes(uint value) + { + byte[] ret = new byte[4]; + PutBytes(ret, 0, value); + return ret; + } + + [CLSCompliant(false)] + public byte[] GetBytes(ulong value) + { + byte[] ret = new byte[8]; + PutBytes(ret, 0, value); + return ret; + } + + static public DataConverter LittleEndian => BitConverter.IsLittleEndian ? Native : SwapConv; + + static public DataConverter BigEndian => BitConverter.IsLittleEndian ? SwapConv : Native; + + static public DataConverter Native { get; } = new CopyConverter(); + + internal void Check(byte[] dest, int destIdx, int size) + { + if (dest == null) + { + throw new ArgumentNullException(nameof(dest)); + } + if (destIdx < 0 || destIdx > dest.Length - size) + { + throw new ArgumentException("destIdx"); + } + } + + private class CopyConverter : DataConverter + { + public override double GetDouble(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 8) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + double ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 8; i++) + { + b[i] = data[index + i]; + } + + return ret; + } + + public override ulong GetUInt64(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 8) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + ulong ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 8; i++) + { + b[i] = data[index + i]; + } + + return ret; + } + + public override long GetInt64(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 8) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + long ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 8; i++) + { + b[i] = data[index + i]; + } + + return ret; + } + + public override float GetFloat(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 4) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + float ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 4; i++) + { + b[i] = data[index + i]; + } + + return ret; + } + + public override int GetInt32(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 4) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + int ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 4; i++) + { + b[i] = data[index + i]; + } + + return ret; + } + + public override uint GetUInt32(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 4) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + uint ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 4; i++) + { + b[i] = data[index + i]; + } + + return ret; + } + + public override short GetInt16(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 2) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + short ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 2; i++) + { + b[i] = data[index + i]; + } + + return ret; + } + + public override ushort GetUInt16(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 2) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + ushort ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 2; i++) + { + b[i] = data[index + i]; + } + + return ret; + } + + public override void PutBytes(byte[] dest, int destIdx, double value) + { + Check(dest, destIdx, 8); + fixed (byte* target = &dest[destIdx]) + { + long* source = (long*)&value; + + *((long*)target) = *source; + } + } + + public override void PutBytes(byte[] dest, int destIdx, float value) + { + Check(dest, destIdx, 4); + fixed (byte* target = &dest[destIdx]) + { + uint* source = (uint*)&value; + + *((uint*)target) = *source; + } + } + + public override void PutBytes(byte[] dest, int destIdx, int value) + { + Check(dest, destIdx, 4); + fixed (byte* target = &dest[destIdx]) + { + uint* source = (uint*)&value; + + *((uint*)target) = *source; + } + } + + public override void PutBytes(byte[] dest, int destIdx, uint value) + { + Check(dest, destIdx, 4); + fixed (byte* target = &dest[destIdx]) + { + uint* source = &value; + + *((uint*)target) = *source; + } + } + + public override void PutBytes(byte[] dest, int destIdx, long value) + { + Check(dest, destIdx, 8); + fixed (byte* target = &dest[destIdx]) + { + long* source = &value; + + *((long*)target) = *source; + } + } + + public override void PutBytes(byte[] dest, int destIdx, ulong value) + { + Check(dest, destIdx, 8); + fixed (byte* target = &dest[destIdx]) + { + ulong* source = &value; + + *((ulong*)target) = *source; + } + } + + public override void PutBytes(byte[] dest, int destIdx, short value) + { + Check(dest, destIdx, 2); + fixed (byte* target = &dest[destIdx]) + { + ushort* source = (ushort*)&value; + + *((ushort*)target) = *source; + } + } + + public override void PutBytes(byte[] dest, int destIdx, ushort value) + { + Check(dest, destIdx, 2); + fixed (byte* target = &dest[destIdx]) + { + ushort* source = &value; + + *((ushort*)target) = *source; + } + } + } + + private class SwapConverter : DataConverter + { + public override double GetDouble(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 8) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + double ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 8; i++) + { + b[7 - i] = data[index + i]; + } + + return ret; + } + + public override ulong GetUInt64(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 8) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + ulong ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 8; i++) + { + b[7 - i] = data[index + i]; + } + + return ret; + } + + public override long GetInt64(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 8) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + long ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 8; i++) + { + b[7 - i] = data[index + i]; + } + + return ret; + } + + public override float GetFloat(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 4) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + float ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 4; i++) + { + b[3 - i] = data[index + i]; + } + + return ret; + } + + public override int GetInt32(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 4) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + int ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 4; i++) + { + b[3 - i] = data[index + i]; + } + + return ret; + } + + public override uint GetUInt32(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 4) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + uint ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 4; i++) + { + b[3 - i] = data[index + i]; + } + + return ret; + } + + public override short GetInt16(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 2) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + short ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 2; i++) + { + b[1 - i] = data[index + i]; + } + + return ret; + } + + public override ushort GetUInt16(byte[] data, int index) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + if (data.Length - index < 2) + { + throw new ArgumentException("index"); + } + if (index < 0) + { + throw new ArgumentException("index"); + } + + ushort ret; + byte* b = (byte*)&ret; + + for (int i = 0; i < 2; i++) + { + b[1 - i] = data[index + i]; + } + + return ret; + } + + public override void PutBytes(byte[] dest, int destIdx, double value) + { + Check(dest, destIdx, 8); + + fixed (byte* target = &dest[destIdx]) + { + byte* source = (byte*)&value; + + for (int i = 0; i < 8; i++) + { + target[i] = source[7 - i]; + } + } + } + + public override void PutBytes(byte[] dest, int destIdx, float value) + { + Check(dest, destIdx, 4); + + fixed (byte* target = &dest[destIdx]) + { + byte* source = (byte*)&value; + + for (int i = 0; i < 4; i++) + { + target[i] = source[3 - i]; + } + } + } + + public override void PutBytes(byte[] dest, int destIdx, int value) + { + Check(dest, destIdx, 4); + + fixed (byte* target = &dest[destIdx]) + { + byte* source = (byte*)&value; + + for (int i = 0; i < 4; i++) + { + target[i] = source[3 - i]; + } + } + } + + public override void PutBytes(byte[] dest, int destIdx, uint value) + { + Check(dest, destIdx, 4); + + fixed (byte* target = &dest[destIdx]) + { + byte* source = (byte*)&value; + + for (int i = 0; i < 4; i++) + { + target[i] = source[3 - i]; + } + } + } + + public override void PutBytes(byte[] dest, int destIdx, long value) + { + Check(dest, destIdx, 8); + + fixed (byte* target = &dest[destIdx]) + { + byte* source = (byte*)&value; + + for (int i = 0; i < 8; i++) + { + target[i] = source[7 - i]; + } + } + } + + public override void PutBytes(byte[] dest, int destIdx, ulong value) + { + Check(dest, destIdx, 8); + + fixed (byte* target = &dest[destIdx]) + { + byte* source = (byte*)&value; + + for (int i = 0; i < 8; i++) + { + target[i] = source[7 - i]; + } + } + } + + public override void PutBytes(byte[] dest, int destIdx, short value) + { + Check(dest, destIdx, 2); + + fixed (byte* target = &dest[destIdx]) + { + byte* source = (byte*)&value; + + for (int i = 0; i < 2; i++) + { + target[i] = source[1 - i]; + } + } + } + + public override void PutBytes(byte[] dest, int destIdx, ushort value) + { + Check(dest, destIdx, 2); + + fixed (byte* target = &dest[destIdx]) + { + byte* source = (byte*)&value; + + for (int i = 0; i < 2; i++) + { + target[i] = source[1 - i]; + } + } + } + } + +#if MONO_DATACONVERTER_STATIC_METHODS + static unsafe void PutBytesLE (byte *dest, byte *src, int count) + { + int i = 0; + + if (BitConverter.IsLittleEndian){ + for (; i < count; i++) + *dest++ = *src++; + } else { + dest += count; + for (; i < count; i++) + *(--dest) = *src++; + } + } + + static unsafe void PutBytesBE (byte *dest, byte *src, int count) + { + int i = 0; + + if (BitConverter.IsLittleEndian){ + dest += count; + for (; i < count; i++) + *(--dest) = *src++; + } else { + for (; i < count; i++) + *dest++ = *src++; + } + } + + static unsafe void PutBytesNative (byte *dest, byte *src, int count) + { + int i = 0; + + for (; i < count; i++) + dest [i-count] = *src++; + } + + static public unsafe double DoubleFromLE (byte[] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 8) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + double ret; + fixed (byte *src = &data[index]){ + PutBytesLE ((byte *) &ret, src, 8); + } + return ret; + } + + static public unsafe float FloatFromLE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 4) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + float ret; + fixed (byte *src = &data[index]){ + PutBytesLE ((byte *) &ret, src, 4); + } + return ret; + } + + static public unsafe long Int64FromLE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 8) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + long ret; + fixed (byte *src = &data[index]){ + PutBytesLE ((byte *) &ret, src, 8); + } + return ret; + } + + static public unsafe ulong UInt64FromLE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 8) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + ulong ret; + fixed (byte *src = &data[index]){ + PutBytesLE ((byte *) &ret, src, 8); + } + return ret; + } + + static public unsafe int Int32FromLE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 4) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + int ret; + fixed (byte *src = &data[index]){ + PutBytesLE ((byte *) &ret, src, 4); + } + return ret; + } + + static public unsafe uint UInt32FromLE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 4) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + uint ret; + fixed (byte *src = &data[index]){ + PutBytesLE ((byte *) &ret, src, 4); + } + return ret; + } + + static public unsafe short Int16FromLE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 2) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + short ret; + fixed (byte *src = &data[index]){ + PutBytesLE ((byte *) &ret, src, 2); + } + return ret; + } + + static public unsafe ushort UInt16FromLE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 2) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + ushort ret; + fixed (byte *src = &data[index]){ + PutBytesLE ((byte *) &ret, src, 2); + } + return ret; + } + + static public unsafe double DoubleFromBE (byte[] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 8) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + double ret; + fixed (byte *src = &data[index]){ + PutBytesBE ((byte *) &ret, src, 8); + } + return ret; + } + + static public unsafe float FloatFromBE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 4) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + float ret; + fixed (byte *src = &data[index]){ + PutBytesBE ((byte *) &ret, src, 4); + } + return ret; + } + + static public unsafe long Int64FromBE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 8) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + long ret; + fixed (byte *src = &data[index]){ + PutBytesBE ((byte *) &ret, src, 8); + } + return ret; + } + + static public unsafe ulong UInt64FromBE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 8) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + ulong ret; + fixed (byte *src = &data[index]){ + PutBytesBE ((byte *) &ret, src, 8); + } + return ret; + } + + static public unsafe int Int32FromBE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 4) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + int ret; + fixed (byte *src = &data[index]){ + PutBytesBE ((byte *) &ret, src, 4); + } + return ret; + } + + static public unsafe uint UInt32FromBE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 4) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + uint ret; + fixed (byte *src = &data[index]){ + PutBytesBE ((byte *) &ret, src, 4); + } + return ret; + } + + static public unsafe short Int16FromBE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 2) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + short ret; + fixed (byte *src = &data[index]){ + PutBytesBE ((byte *) &ret, src, 2); + } + return ret; + } + + static public unsafe ushort UInt16FromBE (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 2) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + ushort ret; + fixed (byte *src = &data[index]){ + PutBytesBE ((byte *) &ret, src, 2); + } + return ret; + } + + static public unsafe double DoubleFromNative (byte[] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 8) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + double ret; + fixed (byte *src = &data[index]){ + PutBytesNative ((byte *) &ret, src, 8); + } + return ret; + } + + static public unsafe float FloatFromNative (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 4) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + float ret; + fixed (byte *src = &data[index]){ + PutBytesNative ((byte *) &ret, src, 4); + } + return ret; + } + + static public unsafe long Int64FromNative (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 8) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + long ret; + fixed (byte *src = &data[index]){ + PutBytesNative ((byte *) &ret, src, 8); + } + return ret; + } + + static public unsafe ulong UInt64FromNative (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 8) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + ulong ret; + fixed (byte *src = &data[index]){ + PutBytesNative ((byte *) &ret, src, 8); + } + return ret; + } + + static public unsafe int Int32FromNative (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 4) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + int ret; + fixed (byte *src = &data[index]){ + PutBytesNative ((byte *) &ret, src, 4); + } + return ret; + } + + static public unsafe uint UInt32FromNative (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 4) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + uint ret; + fixed (byte *src = &data[index]){ + PutBytesNative ((byte *) &ret, src, 4); + } + return ret; + } + + static public unsafe short Int16FromNative (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 2) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + short ret; + fixed (byte *src = &data[index]){ + PutBytesNative ((byte *) &ret, src, 2); + } + return ret; + } + + static public unsafe ushort UInt16FromNative (byte [] data, int index) + { + if (data == null) + throw new ArgumentNullException ("data"); + if (data.Length - index < 2) + throw new ArgumentException ("index"); + if (index < 0) + throw new ArgumentException ("index"); + + ushort ret; + fixed (byte *src = &data[index]){ + PutBytesNative ((byte *) &ret, src, 2); + } + return ret; + } + + unsafe static byte[] GetBytesPtr (byte *ptr, int count) + { + byte [] ret = new byte [count]; + + for (int i = 0; i < count; i++) { + ret [i] = ptr [i]; + } + + return ret; + } + + unsafe static byte[] GetBytesSwap (bool swap, byte *ptr, int count) + { + byte [] ret = new byte [count]; + + if (swap){ + int t = count-1; + for (int i = 0; i < count; i++) { + ret [t-i] = ptr [i]; + } + } else { + for (int i = 0; i < count; i++) { + ret [i] = ptr [i]; + } + } + return ret; + } + + unsafe public static byte[] GetBytesNative (bool value) + { + return GetBytesPtr ((byte *) &value, 1); + } + + unsafe public static byte[] GetBytesNative (char value) + { + return GetBytesPtr ((byte *) &value, 2); + } + + unsafe public static byte[] GetBytesNative (short value) + { + return GetBytesPtr ((byte *) &value, 2); + } + + unsafe public static byte[] GetBytesNative (int value) + { + return GetBytesPtr ((byte *) &value, 4); + } + + unsafe public static byte[] GetBytesNative (long value) + { + return GetBytesPtr ((byte *) &value, 8); + } + + [CLSCompliant (false)] + unsafe public static byte[] GetBytesNative (ushort value) + { + return GetBytesPtr ((byte *) &value, 2); + } + + [CLSCompliant (false)] + unsafe public static byte[] GetBytesNative (uint value) + { + return GetBytesPtr ((byte *) &value, 4); + } + + [CLSCompliant (false)] + unsafe public static byte[] GetBytesNative (ulong value) + { + return GetBytesPtr ((byte *) &value, 8); + } + + unsafe public static byte[] GetBytesNative (float value) + { + return GetBytesPtr ((byte *) &value, 4); + } + + unsafe public static byte[] GetBytesNative (double value) + { + return GetBytesPtr ((byte *) &value, 8); + } + + unsafe public static byte[] GetBytesLE (bool value) + { + return GetBytesSwap (!BitConverter.IsLittleEndian, (byte *) &value, 1); + } + + unsafe public static byte[] GetBytesLE (char value) + { + return GetBytesSwap (!BitConverter.IsLittleEndian, (byte *) &value, 2); + } + + unsafe public static byte[] GetBytesLE (short value) + { + return GetBytesSwap (!BitConverter.IsLittleEndian, (byte *) &value, 2); + } + + unsafe public static byte[] GetBytesLE (int value) + { + return GetBytesSwap (!BitConverter.IsLittleEndian, (byte *) &value, 4); + } + + unsafe public static byte[] GetBytesLE (long value) + { + return GetBytesSwap (!BitConverter.IsLittleEndian, (byte *) &value, 8); + } + + [CLSCompliant (false)] + unsafe public static byte[] GetBytesLE (ushort value) + { + return GetBytesSwap (!BitConverter.IsLittleEndian, (byte *) &value, 2); + } + + [CLSCompliant (false)] + unsafe public static byte[] GetBytesLE (uint value) + { + return GetBytesSwap (!BitConverter.IsLittleEndian, (byte *) &value, 4); + } + + [CLSCompliant (false)] + unsafe public static byte[] GetBytesLE (ulong value) + { + return GetBytesSwap (!BitConverter.IsLittleEndian, (byte *) &value, 8); + } + + unsafe public static byte[] GetBytesLE (float value) + { + return GetBytesSwap (!BitConverter.IsLittleEndian, (byte *) &value, 4); + } + + unsafe public static byte[] GetBytesLE (double value) + { + return GetBytesSwap (!BitConverter.IsLittleEndian, (byte *) &value, 8); + } + + unsafe public static byte[] GetBytesBE (bool value) + { + return GetBytesSwap (BitConverter.IsLittleEndian, (byte *) &value, 1); + } + + unsafe public static byte[] GetBytesBE (char value) + { + return GetBytesSwap (BitConverter.IsLittleEndian, (byte *) &value, 2); + } + + unsafe public static byte[] GetBytesBE (short value) + { + return GetBytesSwap (BitConverter.IsLittleEndian, (byte *) &value, 2); + } + + unsafe public static byte[] GetBytesBE (int value) + { + return GetBytesSwap (BitConverter.IsLittleEndian, (byte *) &value, 4); + } + + unsafe public static byte[] GetBytesBE (long value) + { + return GetBytesSwap (BitConverter.IsLittleEndian, (byte *) &value, 8); + } + + [CLSCompliant (false)] + unsafe public static byte[] GetBytesBE (ushort value) + { + return GetBytesSwap (BitConverter.IsLittleEndian, (byte *) &value, 2); + } + + [CLSCompliant (false)] + unsafe public static byte[] GetBytesBE (uint value) + { + return GetBytesSwap (BitConverter.IsLittleEndian, (byte *) &value, 4); + } + + [CLSCompliant (false)] + unsafe public static byte[] GetBytesBE (ulong value) + { + return GetBytesSwap (BitConverter.IsLittleEndian, (byte *) &value, 8); + } + + unsafe public static byte[] GetBytesBE (float value) + { + return GetBytesSwap (BitConverter.IsLittleEndian, (byte *) &value, 4); + } + + unsafe public static byte[] GetBytesBE (double value) + { + return GetBytesSwap (BitConverter.IsLittleEndian, (byte *) &value, 8); + } +#endif + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Crypto/Crc32Stream.cs b/BizHawk.Client.Common/SharpCompress/Crypto/Crc32Stream.cs new file mode 100644 index 0000000000..35dd85bdb6 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Crypto/Crc32Stream.cs @@ -0,0 +1,119 @@ +using System; +using System.Collections.Generic; +using System.IO; + +namespace SharpCompress.Crypto +{ + internal sealed class Crc32Stream : Stream + { + public const uint DefaultPolynomial = 0xedb88320u; + public const uint DefaultSeed = 0xffffffffu; + + private static uint[] defaultTable; + + private readonly uint[] table; + private uint hash; + + private readonly Stream stream; + + public Crc32Stream(Stream stream) + : this(stream, DefaultPolynomial, DefaultSeed) + { + } + + public Crc32Stream(Stream stream, uint polynomial, uint seed) + { + this.stream = stream; + table = InitializeTable(polynomial); + hash = seed; + } + + public Stream WrappedStream => stream; + + public override void Flush() + { + stream.Flush(); + } + + public override int Read(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); + + public override void SetLength(long value) => throw new NotSupportedException(); + + public override void Write(byte[] buffer, int offset, int count) + { + stream.Write(buffer, offset, count); + hash = CalculateCrc(table, hash, buffer, offset, count); + } + + public override void WriteByte(byte value) + { + stream.WriteByte(value); + hash = CalculateCrc(table, hash, value); + } + + public override bool CanRead => stream.CanRead; + public override bool CanSeek => false; + public override bool CanWrite => stream.CanWrite; + public override long Length => throw new NotSupportedException(); + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public uint Crc => ~hash; + + public static uint Compute(byte[] buffer) + { + return Compute(DefaultSeed, buffer); + } + + public static uint Compute(uint seed, byte[] buffer) + { + return Compute(DefaultPolynomial, seed, buffer); + } + + public static uint Compute(uint polynomial, uint seed, byte[] buffer) + { + return ~CalculateCrc(InitializeTable(polynomial), seed, buffer, 0, buffer.Length); + } + + private static uint[] InitializeTable(uint polynomial) + { + if (polynomial == DefaultPolynomial && defaultTable != null) + return defaultTable; + + var createTable = new uint[256]; + for (var i = 0; i < 256; i++) + { + var entry = (uint)i; + for (var j = 0; j < 8; j++) + if ((entry & 1) == 1) + entry = (entry >> 1) ^ polynomial; + else + entry = entry >> 1; + createTable[i] = entry; + } + + if (polynomial == DefaultPolynomial) + defaultTable = createTable; + + return createTable; + } + + private static uint CalculateCrc(uint[] table, uint crc, byte[] buffer, int offset, int count) + { + unchecked + { + for (int i = offset, end = offset + count; i < end; i++) + { + crc = CalculateCrc(table, crc, buffer[i]); + } + } + return crc; + } + + private static uint CalculateCrc(uint[] table, uint crc, byte b) + { + return (crc >> 8) ^ table[(crc ^ b) & 0xFF]; + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Crypto/CryptoException.cs b/BizHawk.Client.Common/SharpCompress/Crypto/CryptoException.cs new file mode 100644 index 0000000000..6e8b9f1416 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Crypto/CryptoException.cs @@ -0,0 +1,25 @@ +using System; + +namespace SharpCompress.Crypto +{ + public class CryptoException + : Exception + { + public CryptoException() + { + } + + public CryptoException( + string message) + : base(message) + { + } + + public CryptoException( + string message, + Exception exception) + : base(message, exception) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Crypto/DataLengthException.cs b/BizHawk.Client.Common/SharpCompress/Crypto/DataLengthException.cs new file mode 100644 index 0000000000..b82a9e9c98 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Crypto/DataLengthException.cs @@ -0,0 +1,35 @@ +using System; + +namespace SharpCompress.Crypto +{ + public class DataLengthException + : CryptoException + { + /** + * base constructor. + */ + + public DataLengthException() + { + } + + /** + * create a DataLengthException with the given message. + * + * @param message the message to be carried with the exception. + */ + + public DataLengthException( + string message) + : base(message) + { + } + + public DataLengthException( + string message, + Exception exception) + : base(message, exception) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Crypto/IBlockCipher.cs b/BizHawk.Client.Common/SharpCompress/Crypto/IBlockCipher.cs new file mode 100644 index 0000000000..6f6542e309 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Crypto/IBlockCipher.cs @@ -0,0 +1,34 @@ +namespace SharpCompress.Crypto +{ + /// Base interface for a symmetric key block cipher. + public interface IBlockCipher + { + /// The name of the algorithm this cipher implements. + string AlgorithmName { get; } + + /// Initialise the cipher. + /// Initialise for encryption if true, for decryption if false. + /// The key or other data required by the cipher. + void Init(bool forEncryption, ICipherParameters parameters); + + /// The block size for this cipher, in bytes. + int GetBlockSize(); + + /// Indicates whether this cipher can handle partial blocks. + bool IsPartialBlockOkay { get; } + + /// Process a block. + /// The input buffer. + /// The offset into inBuf that the input block begins. + /// The output buffer. + /// The offset into outBuf to write the output block. + /// If input block is wrong size, or outBuf too small. + /// The number of bytes processed and produced. + int ProcessBlock(byte[] inBuf, int inOff, byte[] outBuf, int outOff); + + /// + /// Reset the cipher to the same state as it was after the last init (if there was one). + /// + void Reset(); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Crypto/ICipherParameters.cs b/BizHawk.Client.Common/SharpCompress/Crypto/ICipherParameters.cs new file mode 100644 index 0000000000..d6e4af8b8c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Crypto/ICipherParameters.cs @@ -0,0 +1,6 @@ +namespace SharpCompress.Crypto +{ + public interface ICipherParameters + { + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Crypto/KeyParameter.cs b/BizHawk.Client.Common/SharpCompress/Crypto/KeyParameter.cs new file mode 100644 index 0000000000..8658fc9c36 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Crypto/KeyParameter.cs @@ -0,0 +1,48 @@ +using System; + +namespace SharpCompress.Crypto +{ + public class KeyParameter + : ICipherParameters + { + private readonly byte[] key; + + public KeyParameter( + byte[] key) + { + if (key == null) + { + throw new ArgumentNullException(nameof(key)); + } + + this.key = (byte[])key.Clone(); + } + + public KeyParameter( + byte[] key, + int keyOff, + int keyLen) + { + if (key == null) + { + throw new ArgumentNullException(nameof(key)); + } + if (keyOff < 0 || keyOff > key.Length) + { + throw new ArgumentOutOfRangeException(nameof(keyOff)); + } + if (keyLen < 0 || (keyOff + keyLen) > key.Length) + { + throw new ArgumentOutOfRangeException(nameof(keyLen)); + } + + this.key = new byte[keyLen]; + Array.Copy(key, keyOff, this.key, 0, keyLen); + } + + public byte[] GetKey() + { + return (byte[])key.Clone(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Crypto/RijndaelEngine.cs b/BizHawk.Client.Common/SharpCompress/Crypto/RijndaelEngine.cs new file mode 100644 index 0000000000..06e2bbddbc --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Crypto/RijndaelEngine.cs @@ -0,0 +1,731 @@ +using System; + +namespace SharpCompress.Crypto +{ + public class RijndaelEngine + : IBlockCipher + { + private static readonly int MAXROUNDS = 14; + + private static readonly int MAXKC = (256 / 4); + + private static readonly byte[] Logtable = + { + 0, 0, 25, 1, 50, 2, 26, 198, + 75, 199, 27, 104, 51, 238, 223, 3, + 100, 4, 224, 14, 52, 141, 129, 239, + 76, 113, 8, 200, 248, 105, 28, 193, + 125, 194, 29, 181, 249, 185, 39, 106, + 77, 228, 166, 114, 154, 201, 9, 120, + 101, 47, 138, 5, 33, 15, 225, 36, + 18, 240, 130, 69, 53, 147, 218, 142, + 150, 143, 219, 189, 54, 208, 206, 148, + 19, 92, 210, 241, 64, 70, 131, 56, + 102, 221, 253, 48, 191, 6, 139, 98, + 179, 37, 226, 152, 34, 136, 145, 16, + 126, 110, 72, 195, 163, 182, 30, 66, + 58, 107, 40, 84, 250, 133, 61, 186, + 43, 121, 10, 21, 155, 159, 94, 202, + 78, 212, 172, 229, 243, 115, 167, 87, + 175, 88, 168, 80, 244, 234, 214, 116, + 79, 174, 233, 213, 231, 230, 173, 232, + 44, 215, 117, 122, 235, 22, 11, 245, + 89, 203, 95, 176, 156, 169, 81, 160, + 127, 12, 246, 111, 23, 196, 73, 236, + 216, 67, 31, 45, 164, 118, 123, 183, + 204, 187, 62, 90, 251, 96, 177, 134, + 59, 82, 161, 108, 170, 85, 41, 157, + 151, 178, 135, 144, 97, 190, 220, 252, + 188, 149, 207, 205, 55, 63, 91, 209, + 83, 57, 132, 60, 65, 162, 109, 71, + 20, 42, 158, 93, 86, 242, 211, 171, + 68, 17, 146, 217, 35, 32, 46, 137, + 180, 124, 184, 38, 119, 153, 227, 165, + 103, 74, 237, 222, 197, 49, 254, 24, + 13, 99, 140, 128, 192, 247, 112, 7 + }; + + private static readonly byte[] Alogtable = + { + 0, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53, + 95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170, + 229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49, + 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205, + 76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136, + 131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154, + 181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163, + 254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160, + 251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65, + 195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117, + 159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128, + 155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84, + 252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202, + 69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14, + 18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23, + 57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1, + 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53, + 95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170, + 229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49, + 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205, + 76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136, + 131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154, + 181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163, + 254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160, + 251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65, + 195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117, + 159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128, + 155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84, + 252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202, + 69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14, + 18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23, + 57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1 + }; + + private static readonly byte[] S = + { + 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118, + 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192, + 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21, + 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117, + 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132, + 83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, + 208, 239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, + 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, 210, + 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115, + 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219, + 224, 50, 58, 10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121, + 231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8, + 186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, + 112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, + 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223, + 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22 + }; + + private static readonly byte[] Si = + { + 82, 9, 106, 213, 48, 54, 165, 56, 191, 64, 163, 158, 129, 243, 215, 251, + 124, 227, 57, 130, 155, 47, 255, 135, 52, 142, 67, 68, 196, 222, 233, 203, + 84, 123, 148, 50, 166, 194, 35, 61, 238, 76, 149, 11, 66, 250, 195, 78, + 8, 46, 161, 102, 40, 217, 36, 178, 118, 91, 162, 73, 109, 139, 209, 37, + 114, 248, 246, 100, 134, 104, 152, 22, 212, 164, 92, 204, 93, 101, 182, 146, + 108, 112, 72, 80, 253, 237, 185, 218, 94, 21, 70, 87, 167, 141, 157, 132, + 144, 216, 171, 0, 140, 188, 211, 10, 247, 228, 88, 5, 184, 179, 69, 6, + 208, 44, 30, 143, 202, 63, 15, 2, 193, 175, 189, 3, 1, 19, 138, 107, + 58, 145, 17, 65, 79, 103, 220, 234, 151, 242, 207, 206, 240, 180, 230, 115, + 150, 172, 116, 34, 231, 173, 53, 133, 226, 249, 55, 232, 28, 117, 223, 110, + 71, 241, 26, 113, 29, 41, 197, 137, 111, 183, 98, 14, 170, 24, 190, 27, + 252, 86, 62, 75, 198, 210, 121, 32, 154, 219, 192, 254, 120, 205, 90, 244, + 31, 221, 168, 51, 136, 7, 199, 49, 177, 18, 16, 89, 39, 128, 236, 95, + 96, 81, 127, 169, 25, 181, 74, 13, 45, 229, 122, 159, 147, 201, 156, 239, + 160, 224, 59, 77, 174, 42, 245, 176, 200, 235, 187, 60, 131, 83, 153, 97, + 23, 43, 4, 126, 186, 119, 214, 38, 225, 105, 20, 99, 85, 33, 12, 125 + }; + + private static readonly byte[] rcon = + { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, + 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91 + }; + + private static readonly byte[][] shifts0 = + { + new byte[] {0, 8, 16, 24}, + new byte[] {0, 8, 16, 24}, + new byte[] {0, 8, 16, 24}, + new byte[] {0, 8, 16, 32}, + new byte[] {0, 8, 24, 32} + }; + + private static readonly byte[][] shifts1 = + { + new byte[] {0, 24, 16, 8}, + new byte[] {0, 32, 24, 16}, + new byte[] {0, 40, 32, 24}, + new byte[] {0, 48, 40, 24}, + new byte[] {0, 56, 40, 32} + }; + + /** + * multiply two elements of GF(2^m) + * needed for MixColumn and InvMixColumn + */ + + private byte Mul0x2( + int b) + { + if (b != 0) + { + return Alogtable[25 + (Logtable[b] & 0xff)]; + } + return 0; + } + + private byte Mul0x3( + int b) + { + if (b != 0) + { + return Alogtable[1 + (Logtable[b] & 0xff)]; + } + return 0; + } + + private byte Mul0x9( + int b) + { + if (b >= 0) + { + return Alogtable[199 + b]; + } + return 0; + } + + private byte Mul0xb( + int b) + { + if (b >= 0) + { + return Alogtable[104 + b]; + } + return 0; + } + + private byte Mul0xd( + int b) + { + if (b >= 0) + { + return Alogtable[238 + b]; + } + return 0; + } + + private byte Mul0xe( + int b) + { + if (b >= 0) + { + return Alogtable[223 + b]; + } + return 0; + } + + /** + * xor corresponding text input and round key input bytes + */ + + private void KeyAddition( + long[] rk) + { + A0 ^= rk[0]; + A1 ^= rk[1]; + A2 ^= rk[2]; + A3 ^= rk[3]; + } + + private long Shift( + long r, + int shift) + { + //return (((long)((ulong) r >> shift) | (r << (BC - shift)))) & BC_MASK; + + ulong temp = (ulong)r >> shift; + + // NB: This corrects for Mono Bug #79087 (fixed in 1.1.17) + if (shift > 31) + { + temp &= 0xFFFFFFFFUL; + } + + return ((long)temp | (r << (BC - shift))) & BC_MASK; + } + + /** + * Row 0 remains unchanged + * The other three rows are shifted a variable amount + */ + + private void ShiftRow( + byte[] shiftsSC) + { + A1 = Shift(A1, shiftsSC[1]); + A2 = Shift(A2, shiftsSC[2]); + A3 = Shift(A3, shiftsSC[3]); + } + + private long ApplyS( + long r, + byte[] box) + { + long res = 0; + + for (int j = 0; j < BC; j += 8) + { + res |= (long)(box[(int)((r >> j) & 0xff)] & 0xff) << j; + } + + return res; + } + + /** + * Replace every byte of the input by the byte at that place + * in the nonlinear S-box + */ + + private void Substitution( + byte[] box) + { + A0 = ApplyS(A0, box); + A1 = ApplyS(A1, box); + A2 = ApplyS(A2, box); + A3 = ApplyS(A3, box); + } + + /** + * Mix the bytes of every column in a linear way + */ + + private void MixColumn() + { + long r0, r1, r2, r3; + + r0 = r1 = r2 = r3 = 0; + + for (int j = 0; j < BC; j += 8) + { + int a0 = (int)((A0 >> j) & 0xff); + int a1 = (int)((A1 >> j) & 0xff); + int a2 = (int)((A2 >> j) & 0xff); + int a3 = (int)((A3 >> j) & 0xff); + + r0 |= (long)((Mul0x2(a0) ^ Mul0x3(a1) ^ a2 ^ a3) & 0xff) << j; + + r1 |= (long)((Mul0x2(a1) ^ Mul0x3(a2) ^ a3 ^ a0) & 0xff) << j; + + r2 |= (long)((Mul0x2(a2) ^ Mul0x3(a3) ^ a0 ^ a1) & 0xff) << j; + + r3 |= (long)((Mul0x2(a3) ^ Mul0x3(a0) ^ a1 ^ a2) & 0xff) << j; + } + + A0 = r0; + A1 = r1; + A2 = r2; + A3 = r3; + } + + /** + * Mix the bytes of every column in a linear way + * This is the opposite operation of Mixcolumn + */ + + private void InvMixColumn() + { + long r0, r1, r2, r3; + + r0 = r1 = r2 = r3 = 0; + for (int j = 0; j < BC; j += 8) + { + int a0 = (int)((A0 >> j) & 0xff); + int a1 = (int)((A1 >> j) & 0xff); + int a2 = (int)((A2 >> j) & 0xff); + int a3 = (int)((A3 >> j) & 0xff); + + // + // pre-lookup the log table + // + a0 = (a0 != 0) ? (Logtable[a0 & 0xff] & 0xff) : -1; + a1 = (a1 != 0) ? (Logtable[a1 & 0xff] & 0xff) : -1; + a2 = (a2 != 0) ? (Logtable[a2 & 0xff] & 0xff) : -1; + a3 = (a3 != 0) ? (Logtable[a3 & 0xff] & 0xff) : -1; + + r0 |= (long)((Mul0xe(a0) ^ Mul0xb(a1) ^ Mul0xd(a2) ^ Mul0x9(a3)) & 0xff) << j; + + r1 |= (long)((Mul0xe(a1) ^ Mul0xb(a2) ^ Mul0xd(a3) ^ Mul0x9(a0)) & 0xff) << j; + + r2 |= (long)((Mul0xe(a2) ^ Mul0xb(a3) ^ Mul0xd(a0) ^ Mul0x9(a1)) & 0xff) << j; + + r3 |= (long)((Mul0xe(a3) ^ Mul0xb(a0) ^ Mul0xd(a1) ^ Mul0x9(a2)) & 0xff) << j; + } + + A0 = r0; + A1 = r1; + A2 = r2; + A3 = r3; + } + + /** + * Calculate the necessary round keys + * The number of calculations depends on keyBits and blockBits + */ + + private long[][] GenerateWorkingKey( + byte[] key) + { + int KC; + int t, rconpointer = 0; + int keyBits = key.Length * 8; + byte[,] tk = new byte[4, MAXKC]; + + //long[,] W = new long[MAXROUNDS+1,4]; + long[][] W = new long[MAXROUNDS + 1][]; + + for (int i = 0; i < MAXROUNDS + 1; i++) + { + W[i] = new long[4]; + } + + switch (keyBits) + { + case 128: + KC = 4; + break; + case 160: + KC = 5; + break; + case 192: + KC = 6; + break; + case 224: + KC = 7; + break; + case 256: + KC = 8; + break; + default: + throw new ArgumentException("Key length not 128/160/192/224/256 bits."); + } + + if (keyBits >= blockBits) + { + ROUNDS = KC + 6; + } + else + { + ROUNDS = (BC / 8) + 6; + } + + // + // copy the key into the processing area + // + int index = 0; + + for (int i = 0; i < key.Length; i++) + { + tk[i % 4, i / 4] = key[index++]; + } + + t = 0; + + // + // copy values into round key array + // + for (int j = 0; (j < KC) && (t < (ROUNDS + 1) * (BC / 8)); j++, t++) + { + for (int i = 0; i < 4; i++) + { + W[t / (BC / 8)][i] |= (long)(tk[i, j] & 0xff) << ((t * 8) % BC); + } + } + + // + // while not enough round key material calculated + // calculate new values + // + while (t < (ROUNDS + 1) * (BC / 8)) + { + for (int i = 0; i < 4; i++) + { + tk[i, 0] ^= S[tk[(i + 1) % 4, KC - 1] & 0xff]; + } + tk[0, 0] ^= rcon[rconpointer++]; + + if (KC <= 6) + { + for (int j = 1; j < KC; j++) + { + for (int i = 0; i < 4; i++) + { + tk[i, j] ^= tk[i, j - 1]; + } + } + } + else + { + for (int j = 1; j < 4; j++) + { + for (int i = 0; i < 4; i++) + { + tk[i, j] ^= tk[i, j - 1]; + } + } + for (int i = 0; i < 4; i++) + { + tk[i, 4] ^= S[tk[i, 3] & 0xff]; + } + for (int j = 5; j < KC; j++) + { + for (int i = 0; i < 4; i++) + { + tk[i, j] ^= tk[i, j - 1]; + } + } + } + + // + // copy values into round key array + // + for (int j = 0; (j < KC) && (t < (ROUNDS + 1) * (BC / 8)); j++, t++) + { + for (int i = 0; i < 4; i++) + { + W[t / (BC / 8)][i] |= (long)(tk[i, j] & 0xff) << ((t * 8) % (BC)); + } + } + } + return W; + } + + private readonly int BC; + private readonly long BC_MASK; + private int ROUNDS; + private readonly int blockBits; + private long[][] workingKey; + private long A0, A1, A2, A3; + private bool forEncryption; + private readonly byte[] shifts0SC; + private readonly byte[] shifts1SC; + + /** + * default constructor - 128 bit block size. + */ + + public RijndaelEngine() + : this(128) + { + } + + /** + * basic constructor - set the cipher up for a given blocksize + * + * @param blocksize the blocksize in bits, must be 128, 192, or 256. + */ + + public RijndaelEngine( + int blockBits) + { + switch (blockBits) + { + case 128: + BC = 32; + BC_MASK = 0xffffffffL; + shifts0SC = shifts0[0]; + shifts1SC = shifts1[0]; + break; + case 160: + BC = 40; + BC_MASK = 0xffffffffffL; + shifts0SC = shifts0[1]; + shifts1SC = shifts1[1]; + break; + case 192: + BC = 48; + BC_MASK = 0xffffffffffffL; + shifts0SC = shifts0[2]; + shifts1SC = shifts1[2]; + break; + case 224: + BC = 56; + BC_MASK = 0xffffffffffffffL; + shifts0SC = shifts0[3]; + shifts1SC = shifts1[3]; + break; + case 256: + BC = 64; + BC_MASK = unchecked((long)0xffffffffffffffffL); + shifts0SC = shifts0[4]; + shifts1SC = shifts1[4]; + break; + default: + throw new ArgumentException("unknown blocksize to Rijndael"); + } + + this.blockBits = blockBits; + } + + /** + * initialise a Rijndael cipher. + * + * @param forEncryption whether or not we are for encryption. + * @param parameters the parameters required to set up the cipher. + * @exception ArgumentException if the parameters argument is + * inappropriate. + */ + + public void Init( + bool forEncryption, + ICipherParameters parameters) + { + var parameter = parameters as KeyParameter; + if (parameter != null) + { + workingKey = GenerateWorkingKey(parameter.GetKey()); + this.forEncryption = forEncryption; + return; + } + + throw new ArgumentException("invalid parameter passed to Rijndael init - " + parameters.GetType()); + } + + public string AlgorithmName => "Rijndael"; + + public bool IsPartialBlockOkay => false; + + public int GetBlockSize() + { + return BC / 2; + } + + public int ProcessBlock( + byte[] input, + int inOff, + byte[] output, + int outOff) + { + if (workingKey == null) + { + throw new InvalidOperationException("Rijndael engine not initialised"); + } + + if ((inOff + (BC / 2)) > input.Length) + { + throw new DataLengthException("input buffer too short"); + } + + if ((outOff + (BC / 2)) > output.Length) + { + throw new DataLengthException("output buffer too short"); + } + + UnPackBlock(input, inOff); + + if (forEncryption) + { + EncryptBlock(workingKey); + } + else + { + DecryptBlock(workingKey); + } + + PackBlock(output, outOff); + + return BC / 2; + } + + public void Reset() + { + } + + private void UnPackBlock( + byte[] bytes, + int off) + { + int index = off; + + A0 = bytes[index++] & 0xff; + A1 = bytes[index++] & 0xff; + A2 = bytes[index++] & 0xff; + A3 = bytes[index++] & 0xff; + + for (int j = 8; j != BC; j += 8) + { + A0 |= (long)(bytes[index++] & 0xff) << j; + A1 |= (long)(bytes[index++] & 0xff) << j; + A2 |= (long)(bytes[index++] & 0xff) << j; + A3 |= (long)(bytes[index++] & 0xff) << j; + } + } + + private void PackBlock( + byte[] bytes, + int off) + { + int index = off; + + for (int j = 0; j != BC; j += 8) + { + bytes[index++] = (byte)(A0 >> j); + bytes[index++] = (byte)(A1 >> j); + bytes[index++] = (byte)(A2 >> j); + bytes[index++] = (byte)(A3 >> j); + } + } + + private void EncryptBlock( + long[][] rk) + { + int r; + + // + // begin with a key addition + // + KeyAddition(rk[0]); + + // + // ROUNDS-1 ordinary rounds + // + for (r = 1; r < ROUNDS; r++) + { + Substitution(S); + ShiftRow(shifts0SC); + MixColumn(); + KeyAddition(rk[r]); + } + + // + // Last round is special: there is no MixColumn + // + Substitution(S); + ShiftRow(shifts0SC); + KeyAddition(rk[ROUNDS]); + } + + private void DecryptBlock( + long[][] rk) + { + int r; + + // To decrypt: apply the inverse operations of the encrypt routine, + // in opposite order + // + // (KeyAddition is an involution: it 's equal to its inverse) + // (the inverse of Substitution with table S is Substitution with the inverse table of S) + // (the inverse of Shiftrow is Shiftrow over a suitable distance) + // + + // First the special round: + // without InvMixColumn + // with extra KeyAddition + // + KeyAddition(rk[ROUNDS]); + Substitution(Si); + ShiftRow(shifts1SC); + + // + // ROUNDS-1 ordinary rounds + // + for (r = ROUNDS - 1; r > 0; r--) + { + KeyAddition(rk[r]); + InvMixColumn(); + Substitution(Si); + ShiftRow(shifts1SC); + } + + // + // End with the extra key addition + // + KeyAddition(rk[0]); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/EnumExtensions.cs b/BizHawk.Client.Common/SharpCompress/EnumExtensions.cs new file mode 100644 index 0000000000..5f5fe0d6cc --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/EnumExtensions.cs @@ -0,0 +1,18 @@ + +#if NET35 +using System; + +namespace SharpCompress +{ + internal static class EnumExtensions + { + public static bool HasFlag(this Enum enumRef, Enum flag) + { + long value = Convert.ToInt64(enumRef); + long flagVal = Convert.ToInt64(flag); + + return (value & flagVal) == flagVal; + } + } +} +#endif \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/IO/BufferedSubStream.cs b/BizHawk.Client.Common/SharpCompress/IO/BufferedSubStream.cs new file mode 100644 index 0000000000..9b6ae6cd3a --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/IO/BufferedSubStream.cs @@ -0,0 +1,83 @@ +using System; +using System.IO; + +namespace SharpCompress.IO +{ + internal class BufferedSubStream : NonDisposingStream + { + private long position; + private int cacheOffset; + private int cacheLength; + private readonly byte[] cache; + + public BufferedSubStream(Stream stream, long origin, long bytesToRead) : base(stream, throwOnDispose: false) + { + position = origin; + BytesLeftToRead = bytesToRead; + cache = new byte[32 << 10]; + } + + private long BytesLeftToRead { get; set; } + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Length => BytesLeftToRead; + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + if (count > BytesLeftToRead) + { + count = (int)BytesLeftToRead; + } + + if (count > 0) + { + if (cacheLength == 0) + { + cacheOffset = 0; + Stream.Position = position; + cacheLength = Stream.Read(cache, 0, cache.Length); + position += cacheLength; + } + + if (count > cacheLength) + { + count = cacheLength; + } + + Buffer.BlockCopy(cache, cacheOffset, buffer, offset, count); + cacheOffset += count; + cacheLength -= count; + BytesLeftToRead -= count; + } + + return count; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/IO/CountingWritableSubStream.cs b/BizHawk.Client.Common/SharpCompress/IO/CountingWritableSubStream.cs new file mode 100644 index 0000000000..b94303bf55 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/IO/CountingWritableSubStream.cs @@ -0,0 +1,56 @@ +using System; +using System.IO; + +namespace SharpCompress.IO +{ + internal class CountingWritableSubStream : NonDisposingStream + { + internal CountingWritableSubStream(Stream stream) : base(stream, throwOnDispose: false) + { + } + + public ulong Count { get; private set; } + + public override bool CanRead => false; + + public override bool CanSeek => false; + + public override bool CanWrite => true; + + public override void Flush() + { + Stream.Flush(); + } + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + Stream.Write(buffer, offset, count); + Count += (uint)count; + } + + public override void WriteByte(byte value) + { + Stream.WriteByte(value); + ++Count; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/IO/ListeningStream.cs b/BizHawk.Client.Common/SharpCompress/IO/ListeningStream.cs new file mode 100644 index 0000000000..1bab99d2e1 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/IO/ListeningStream.cs @@ -0,0 +1,79 @@ +using System.IO; +using SharpCompress.Common; + +namespace SharpCompress.IO +{ + internal class ListeningStream : Stream + { + private long currentEntryTotalReadBytes; + private readonly IExtractionListener listener; + + public ListeningStream(IExtractionListener listener, Stream stream) + { + Stream = stream; + this.listener = listener; + } + + protected override void Dispose(bool disposing) + { + if (disposing) + { + Stream.Dispose(); + } + base.Dispose(disposing); + } + + public Stream Stream { get; } + + public override bool CanRead => Stream.CanRead; + + public override bool CanSeek => Stream.CanSeek; + + public override bool CanWrite => Stream.CanWrite; + + public override void Flush() + { + Stream.Flush(); + } + + public override long Length => Stream.Length; + + public override long Position { get => Stream.Position; set => Stream.Position = value; } + + public override int Read(byte[] buffer, int offset, int count) + { + int read = Stream.Read(buffer, offset, count); + currentEntryTotalReadBytes += read; + listener.FireCompressedBytesRead(currentEntryTotalReadBytes, currentEntryTotalReadBytes); + return read; + } + + public override int ReadByte() + { + int value = Stream.ReadByte(); + if (value == -1) + { + return -1; + } + + ++currentEntryTotalReadBytes; + listener.FireCompressedBytesRead(currentEntryTotalReadBytes, currentEntryTotalReadBytes); + return value; + } + + public override long Seek(long offset, SeekOrigin origin) + { + return Stream.Seek(offset, origin); + } + + public override void SetLength(long value) + { + Stream.SetLength(value); + } + + public override void Write(byte[] buffer, int offset, int count) + { + Stream.Write(buffer, offset, count); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/IO/MarkingBinaryReader.cs b/BizHawk.Client.Common/SharpCompress/IO/MarkingBinaryReader.cs new file mode 100644 index 0000000000..e103799532 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/IO/MarkingBinaryReader.cs @@ -0,0 +1,206 @@ +using System; +using System.IO; +using SharpCompress.Converters; + +namespace SharpCompress.IO +{ + internal class MarkingBinaryReader : BinaryReader + { + public MarkingBinaryReader(Stream stream) + : base(stream) + { + } + + public virtual long CurrentReadByteCount { get; protected set; } + + public virtual void Mark() + { + CurrentReadByteCount = 0; + } + + public override int Read() + { + throw new NotSupportedException(); + } + + public override int Read(byte[] buffer, int index, int count) + { + throw new NotSupportedException(); + } + + public override int Read(char[] buffer, int index, int count) + { + throw new NotSupportedException(); + } + + public override bool ReadBoolean() + { + return ReadByte() != 0; + } + + // NOTE: there is a somewhat fragile dependency on the internals of this class + // with RarCrcBinaryReader and RarCryptoBinaryReader. + // + // RarCrcBinaryReader/RarCryptoBinaryReader need to override any specific methods + // that call directly to the base BinaryReader and do not delegate to other methods + // in this class so that it can track the each byte being read. + // + // if altering this class in a way that changes the implementation be sure to + // update RarCrcBinaryReader/RarCryptoBinaryReader. + public override byte ReadByte() + { + CurrentReadByteCount++; + return base.ReadByte(); + } + + public override byte[] ReadBytes(int count) + { + CurrentReadByteCount += count; + var bytes = base.ReadBytes(count); + if (bytes.Length != count) + { + throw new EndOfStreamException(string.Format("Could not read the requested amount of bytes. End of stream reached. Requested: {0} Read: {1}", count, bytes.Length)); + } + return bytes; + } + + public override char ReadChar() + { + throw new NotSupportedException(); + } + + public override char[] ReadChars(int count) + { + throw new NotSupportedException(); + } + +#if !SILVERLIGHT + public override decimal ReadDecimal() + { + throw new NotSupportedException(); + } +#endif + + public override double ReadDouble() + { + throw new NotSupportedException(); + } + + public override short ReadInt16() + { + return DataConverter.LittleEndian.GetInt16(ReadBytes(2), 0); + } + + public override int ReadInt32() + { + return DataConverter.LittleEndian.GetInt32(ReadBytes(4), 0); + } + + public override long ReadInt64() + { + return DataConverter.LittleEndian.GetInt64(ReadBytes(8), 0); + } + + public override sbyte ReadSByte() + { + return (sbyte)ReadByte(); + } + + public override float ReadSingle() + { + throw new NotSupportedException(); + } + + public override string ReadString() + { + throw new NotSupportedException(); + } + + public override ushort ReadUInt16() + { + return DataConverter.LittleEndian.GetUInt16(ReadBytes(2), 0); + } + + public override uint ReadUInt32() + { + return DataConverter.LittleEndian.GetUInt32(ReadBytes(4), 0); + } + + public override ulong ReadUInt64() + { + return DataConverter.LittleEndian.GetUInt64(ReadBytes(8), 0); + } + + // RAR5 style variable length encoded value + // maximum value of 0xffffffffffffffff (64 bits) + // technote: "implies max 10 bytes consumed" -- but not really because we could extend indefinitely using 0x80 0x80 ... 0x80 00 + // + // Variable length integer. Can include one or more bytes, where lower 7 bits of every byte contain integer data + // and highest bit in every byte is the continuation flag. If highest bit is 0, this is the last byte in sequence. + // So first byte contains 7 least significant bits of integer and continuation flag. Second byte, if present, + // contains next 7 bits and so on. + public ulong ReadRarVInt(int maxBytes = 10) { + // hopefully this gets inlined + return DoReadRarVInt((maxBytes - 1) * 7); + } + + private ulong DoReadRarVInt(int maxShift) { + int shift = 0; + ulong result = 0; + do { + byte b0 = ReadByte(); + uint b1 = ((uint)b0) & 0x7f; + ulong n = b1; + ulong shifted = n << shift; + if (n != shifted >> shift) { + // overflow + break; + } + result |= shifted; + if (b0 == b1) { + return result; + } + shift += 7; + } while (shift <= maxShift); + + throw new FormatException("malformed vint"); + } + + public uint ReadRarVIntUInt32(int maxBytes = 5) { + // hopefully this gets inlined + return DoReadRarVIntUInt32((maxBytes - 1) * 7); + } + + public ushort ReadRarVIntUInt16(int maxBytes = 3) { + // hopefully this gets inlined + return checked((ushort)DoReadRarVIntUInt32((maxBytes - 1) * 7)); + } + + public byte ReadRarVIntByte(int maxBytes = 2) { + // hopefully this gets inlined + return checked((byte)DoReadRarVIntUInt32((maxBytes - 1) * 7)); + } + + private uint DoReadRarVIntUInt32(int maxShift) { + int shift = 0; + uint result = 0; + do { + byte b0 = ReadByte(); + uint b1 = ((uint)b0) & 0x7f; + uint n = b1; + uint shifted = n << shift; + if (n != shifted >> shift) { + // overflow + break; + } + result |= shifted; + if (b0 == b1) { + return result; + } + shift += 7; + } while (shift <= maxShift); + + throw new FormatException("malformed vint"); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/IO/NonDisposingStream.cs b/BizHawk.Client.Common/SharpCompress/IO/NonDisposingStream.cs new file mode 100644 index 0000000000..9c7c4059d5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/IO/NonDisposingStream.cs @@ -0,0 +1,61 @@ +using System; +using System.IO; + +namespace SharpCompress.IO +{ + public class NonDisposingStream : Stream + { + public NonDisposingStream(Stream stream, bool throwOnDispose = false) + { + Stream = stream; + ThrowOnDispose = throwOnDispose; + } + + public bool ThrowOnDispose { get; set; } + + protected override void Dispose(bool disposing) + { + if (ThrowOnDispose) + { + throw new InvalidOperationException($"Attempt to dispose of a {nameof(NonDisposingStream)} when {nameof(ThrowOnDispose)} is {ThrowOnDispose}"); + } + } + + protected Stream Stream { get; } + + public override bool CanRead => Stream.CanRead; + + public override bool CanSeek => Stream.CanSeek; + + public override bool CanWrite => Stream.CanWrite; + + public override void Flush() + { + Stream.Flush(); + } + + public override long Length => Stream.Length; + + public override long Position { get => Stream.Position; set => Stream.Position = value; } + + public override int Read(byte[] buffer, int offset, int count) + { + return Stream.Read(buffer, offset, count); + } + + public override long Seek(long offset, SeekOrigin origin) + { + return Stream.Seek(offset, origin); + } + + public override void SetLength(long value) + { + Stream.SetLength(value); + } + + public override void Write(byte[] buffer, int offset, int count) + { + Stream.Write(buffer, offset, count); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/IO/ReadOnlySubStream.cs b/BizHawk.Client.Common/SharpCompress/IO/ReadOnlySubStream.cs new file mode 100644 index 0000000000..e05921ca08 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/IO/ReadOnlySubStream.cs @@ -0,0 +1,83 @@ +using System; +using System.IO; + +namespace SharpCompress.IO +{ + internal class ReadOnlySubStream : NonDisposingStream + { + public ReadOnlySubStream(Stream stream, long bytesToRead) + : this(stream, null, bytesToRead) + { + } + + public ReadOnlySubStream(Stream stream, long? origin, long bytesToRead) + : base(stream, throwOnDispose: false) + { + if (origin != null) + { + stream.Position = origin.Value; + } + BytesLeftToRead = bytesToRead; + } + + private long BytesLeftToRead { get; set; } + + public override bool CanRead => true; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + public override int Read(byte[] buffer, int offset, int count) + { + if (BytesLeftToRead < count) + { + count = (int)BytesLeftToRead; + } + int read = Stream.Read(buffer, offset, count); + if (read > 0) + { + BytesLeftToRead -= read; + } + return read; + } + + public override int ReadByte() + { + if (BytesLeftToRead <= 0) + { + return -1; + } + int value = Stream.ReadByte(); + if (value != -1) + { + --BytesLeftToRead; + } + return value; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/IO/RewindableStream.cs b/BizHawk.Client.Common/SharpCompress/IO/RewindableStream.cs new file mode 100644 index 0000000000..3cfc035cbc --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/IO/RewindableStream.cs @@ -0,0 +1,164 @@ +using System; +using System.IO; +using SharpCompress.Compressors.Filters; + +namespace SharpCompress.IO +{ + internal class RewindableStream : Stream + { + private readonly Stream stream; + private MemoryStream bufferStream = new MemoryStream(); + private bool isRewound; + private bool isDisposed; + + public RewindableStream(Stream stream) + { + this.stream = stream; + } + + internal bool IsRecording { get; private set; } + + protected override void Dispose(bool disposing) + { + if (isDisposed) + { + return; + } + isDisposed = true; + base.Dispose(disposing); + if (disposing) + { + stream.Dispose(); + } + } + + public void Rewind(bool stopRecording) + { + isRewound = true; + IsRecording = !stopRecording; + bufferStream.Position = 0; + } + + public void Rewind(MemoryStream buffer) + { + if (bufferStream.Position >= buffer.Length) + { + bufferStream.Position -= buffer.Length; + } + else + { + + bufferStream.TransferTo(buffer); + //create new memorystream to allow proper resizing as memorystream could be a user provided buffer + //https://github.com/adamhathcock/sharpcompress/issues/306 + bufferStream = new MemoryStream(); + buffer.Position = 0; + buffer.TransferTo(bufferStream); + bufferStream.Position = 0; + } + isRewound = true; + } + + public void StartRecording() + { + //if (isRewound && bufferStream.Position != 0) + // throw new System.NotImplementedException(); + if (bufferStream.Position != 0) + { + byte[] data = bufferStream.ToArray(); + long position = bufferStream.Position; + bufferStream.SetLength(0); + bufferStream.Write(data, (int)position, data.Length - (int)position); + bufferStream.Position = 0; + } + IsRecording = true; + } + + public override bool CanRead => true; + + public override bool CanSeek => stream.CanSeek; + + public override bool CanWrite => false; + + public override void Flush() + { + throw new NotSupportedException(); + } + + public override long Length => throw new NotSupportedException(); + + public override long Position + { + get { return stream.Position + bufferStream.Position - bufferStream.Length; } + set + { + if (!isRewound) + { + stream.Position = value; + } + else if (value < stream.Position - bufferStream.Length || value >= stream.Position) + { + stream.Position = value; + isRewound = false; + bufferStream.SetLength(0); + } + else + { + bufferStream.Position = value - stream.Position + bufferStream.Length; + } + } + } + + public override int Read(byte[] buffer, int offset, int count) + { + //don't actually read if we don't really want to read anything + //currently a network stream bug on Windows for .NET Core + if (count == 0) + { + return 0; + } + int read; + if (isRewound && bufferStream.Position != bufferStream.Length) + { + read = bufferStream.Read(buffer, offset, count); + if (read < count) + { + int tempRead = stream.Read(buffer, offset + read, count - read); + if (IsRecording) + { + bufferStream.Write(buffer, offset + read, tempRead); + } + read += tempRead; + } + if (bufferStream.Position == bufferStream.Length && !IsRecording) + { + isRewound = false; + bufferStream.SetLength(0); + } + return read; + } + + read = stream.Read(buffer, offset, count); + if (IsRecording) + { + bufferStream.Write(buffer, offset, read); + } + return read; + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/IO/StreamingMode.cs b/BizHawk.Client.Common/SharpCompress/IO/StreamingMode.cs new file mode 100644 index 0000000000..29d3a147f9 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/IO/StreamingMode.cs @@ -0,0 +1,8 @@ +namespace SharpCompress.IO +{ + internal enum StreamingMode + { + Streaming, + Seekable + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Lazy.cs b/BizHawk.Client.Common/SharpCompress/Lazy.cs new file mode 100644 index 0000000000..893e3bb1f2 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Lazy.cs @@ -0,0 +1,29 @@ +using System; + +namespace SharpCompress +{ + public class Lazy + { + private readonly Func _lazyFunc; + private bool _evaluated; + private T _value; + + public Lazy(Func lazyFunc) + { + _lazyFunc = lazyFunc; + } + + public T Value + { + get + { + if (!_evaluated) + { + _value = _lazyFunc(); + _evaluated = true; + } + return _value; + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/LazyReadOnlyCollection.cs b/BizHawk.Client.Common/SharpCompress/LazyReadOnlyCollection.cs new file mode 100644 index 0000000000..ecec3aea9c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/LazyReadOnlyCollection.cs @@ -0,0 +1,151 @@ +using System; +using System.Collections; +using System.Collections.Generic; + +namespace SharpCompress +{ + internal class LazyReadOnlyCollection : ICollection + { + private readonly List backing = new List(); + private readonly IEnumerator source; + private bool fullyLoaded; + + public LazyReadOnlyCollection(IEnumerable source) + { + this.source = source.GetEnumerator(); + } + + private class LazyLoader : IEnumerator + { + private readonly LazyReadOnlyCollection lazyReadOnlyCollection; + private bool disposed; + private int index = -1; + + internal LazyLoader(LazyReadOnlyCollection lazyReadOnlyCollection) + { + this.lazyReadOnlyCollection = lazyReadOnlyCollection; + } + + #region IEnumerator Members + + public T Current => lazyReadOnlyCollection.backing[index]; + + #endregion + + #region IDisposable Members + + public void Dispose() + { + if (!disposed) + { + disposed = true; + } + } + + #endregion + + #region IEnumerator Members + + object IEnumerator.Current => Current; + + public bool MoveNext() + { + if (index + 1 < lazyReadOnlyCollection.backing.Count) + { + index++; + return true; + } + if (!lazyReadOnlyCollection.fullyLoaded && lazyReadOnlyCollection.source.MoveNext()) + { + lazyReadOnlyCollection.backing.Add(lazyReadOnlyCollection.source.Current); + index++; + return true; + } + lazyReadOnlyCollection.fullyLoaded = true; + return false; + } + + public void Reset() + { + throw new NotSupportedException(); + } + + #endregion + } + + internal void EnsureFullyLoaded() + { + if (!fullyLoaded) + { + this.ForEach(x => { }); + fullyLoaded = true; + } + } + + internal IEnumerable GetLoaded() + { + return backing; + } + + #region ICollection Members + + public void Add(T item) + { + throw new NotSupportedException(); + } + + public void Clear() + { + throw new NotSupportedException(); + } + + public bool Contains(T item) + { + EnsureFullyLoaded(); + return backing.Contains(item); + } + + public void CopyTo(T[] array, int arrayIndex) + { + EnsureFullyLoaded(); + backing.CopyTo(array, arrayIndex); + } + + public int Count + { + get + { + EnsureFullyLoaded(); + return backing.Count; + } + } + + public bool IsReadOnly => true; + + public bool Remove(T item) + { + throw new NotSupportedException(); + } + + #endregion + + #region IEnumerable Members + + //TODO check for concurrent access + public IEnumerator GetEnumerator() + { + return new LazyLoader(this); + } + + #endregion + + #region IEnumerable Members + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + #endregion + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/ReadOnlyCollection.cs b/BizHawk.Client.Common/SharpCompress/ReadOnlyCollection.cs new file mode 100644 index 0000000000..32c2abc8b4 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/ReadOnlyCollection.cs @@ -0,0 +1,55 @@ +using System; +using System.Collections; +using System.Collections.Generic; + +namespace SharpCompress +{ + internal class ReadOnlyCollection : ICollection + { + private readonly ICollection collection; + + public ReadOnlyCollection(ICollection collection) + { + this.collection = collection; + } + + public void Add(T item) + { + throw new NotSupportedException(); + } + + public void Clear() + { + throw new NotSupportedException(); + } + + public bool Contains(T item) + { + return collection.Contains(item); + } + + public void CopyTo(T[] array, int arrayIndex) + { + collection.CopyTo(array, arrayIndex); + } + + public int Count => collection.Count; + + public bool IsReadOnly => true; + + public bool Remove(T item) + { + throw new NotSupportedException(); + } + + public IEnumerator GetEnumerator() + { + return collection.GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + throw new NotSupportedException(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/AbstractReader.cs b/BizHawk.Client.Common/SharpCompress/Readers/AbstractReader.cs new file mode 100644 index 0000000000..b09497f1a0 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/AbstractReader.cs @@ -0,0 +1,232 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; + +namespace SharpCompress.Readers +{ + /// + /// A generic push reader that reads unseekable comrpessed streams. + /// + public abstract class AbstractReader : IReader, IReaderExtractionListener + where TEntry : Entry + where TVolume : Volume + { + private bool completed; + private IEnumerator entriesForCurrentReadStream; + private bool wroteCurrentEntry; + + public event EventHandler> EntryExtractionProgress; + + public event EventHandler CompressedBytesRead; + public event EventHandler FilePartExtractionBegin; + + internal AbstractReader(ReaderOptions options, ArchiveType archiveType) + { + ArchiveType = archiveType; + Options = options; + } + + internal ReaderOptions Options { get; } + + public ArchiveType ArchiveType { get; } + + /// + /// Current volume that the current entry resides in + /// + public abstract TVolume Volume { get; } + + /// + /// Current file entry + /// + public TEntry Entry => entriesForCurrentReadStream.Current; + + #region IDisposable Members + + public void Dispose() + { + entriesForCurrentReadStream?.Dispose(); + Volume?.Dispose(); + } + + #endregion + + public bool Cancelled { get; private set; } + + /// + /// Indicates that the remaining entries are not required. + /// On dispose of an EntryStream, the stream will not skip to the end of the entry. + /// An attempt to move to the next entry will throw an exception, as the compressed stream is not positioned at an entry boundary. + /// + public void Cancel() + { + if (!completed) + { + Cancelled = true; + } + } + + public bool MoveToNextEntry() + { + if (completed) + { + return false; + } + if (Cancelled) + { + throw new InvalidOperationException("Reader has been cancelled."); + } + if (entriesForCurrentReadStream == null) + { + return LoadStreamForReading(RequestInitialStream()); + } + if (!wroteCurrentEntry) + { + SkipEntry(); + } + wroteCurrentEntry = false; + if (NextEntryForCurrentStream()) + { + return true; + } + completed = true; + return false; + } + + protected bool LoadStreamForReading(Stream stream) + { + entriesForCurrentReadStream?.Dispose(); + if ((stream == null) || (!stream.CanRead)) + { + throw new MultipartStreamRequiredException("File is split into multiple archives: '" + + Entry.Key + + "'. A new readable stream is required. Use Cancel if it was intended."); + } + entriesForCurrentReadStream = GetEntries(stream).GetEnumerator(); + return entriesForCurrentReadStream.MoveNext(); + } + + protected virtual Stream RequestInitialStream() + { + return Volume.Stream; + } + + internal virtual bool NextEntryForCurrentStream() + { + return entriesForCurrentReadStream.MoveNext(); + } + + protected abstract IEnumerable GetEntries(Stream stream); + + #region Entry Skip/Write + + private void SkipEntry() + { + if (!Entry.IsDirectory) + { + Skip(); + } + } + + private void Skip() + { + if (ArchiveType != ArchiveType.Rar + && !Entry.IsSolid + && Entry.CompressedSize > 0) + { + //not solid and has a known compressed size then we can skip raw bytes. + var part = Entry.Parts.First(); + var rawStream = part.GetRawStream(); + + if (rawStream != null) + { + var bytesToAdvance = Entry.CompressedSize; + rawStream.Skip(bytesToAdvance); + part.Skipped = true; + return; + } + } + //don't know the size so we have to try to decompress to skip + using (var s = OpenEntryStream()) + { + s.Skip(); + } + } + + public void WriteEntryTo(Stream writableStream) + { + if (wroteCurrentEntry) + { + throw new ArgumentException("WriteEntryTo or OpenEntryStream can only be called once."); + } + if ((writableStream == null) || (!writableStream.CanWrite)) + { + throw new ArgumentNullException("A writable Stream was required. Use Cancel if that was intended."); + } + + Write(writableStream); + wroteCurrentEntry = true; + } + + internal void Write(Stream writeStream) + { + var streamListener = this as IReaderExtractionListener; + using (Stream s = OpenEntryStream()) + { + s.TransferTo(writeStream, Entry, streamListener); + } + } + + public EntryStream OpenEntryStream() + { + if (wroteCurrentEntry) + { + throw new ArgumentException("WriteEntryTo or OpenEntryStream can only be called once."); + } + var stream = GetEntryStream(); + wroteCurrentEntry = true; + return stream; + } + + /// + /// Retains a reference to the entry stream, so we can check whether it completed later. + /// + protected EntryStream CreateEntryStream(Stream decompressed) + { + return new EntryStream(this, decompressed); + } + + protected virtual EntryStream GetEntryStream() + { + return CreateEntryStream(Entry.Parts.First().GetCompressedStream()); + } + + #endregion + + IEntry IReader.Entry => Entry; + + void IExtractionListener.FireCompressedBytesRead(long currentPartCompressedBytes, long compressedReadBytes) + { + CompressedBytesRead?.Invoke(this, new CompressedBytesReadEventArgs + { + CurrentFilePartCompressedBytesRead = currentPartCompressedBytes, + CompressedBytesRead = compressedReadBytes + }); + } + + void IExtractionListener.FireFilePartExtractionBegin(string name, long size, long compressedSize) + { + FilePartExtractionBegin?.Invoke(this, new FilePartExtractionBeginEventArgs + { + CompressedSize = compressedSize, + Size = size, + Name = name + }); + } + void IReaderExtractionListener.FireEntryExtractionProgress(Entry entry, long bytesTransferred, int iterations) + { + EntryExtractionProgress?.Invoke(this, new ReaderExtractionEventArgs(entry, new ReaderProgress(entry, bytesTransferred, iterations))); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/GZip/GZipReader.cs b/BizHawk.Client.Common/SharpCompress/Readers/GZip/GZipReader.cs new file mode 100644 index 0000000000..ecee69ffe5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/GZip/GZipReader.cs @@ -0,0 +1,39 @@ +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common; +using SharpCompress.Common.GZip; + +namespace SharpCompress.Readers.GZip +{ + public class GZipReader : AbstractReader + { + internal GZipReader(Stream stream, ReaderOptions options) + : base(options, ArchiveType.GZip) + { + Volume = new GZipVolume(stream, options); + } + + public override GZipVolume Volume { get; } + + #region Open + + /// + /// Opens a GZipReader for Non-seeking usage with a single volume + /// + /// + /// + /// + public static GZipReader Open(Stream stream, ReaderOptions options = null) + { + stream.CheckNotNull("stream"); + return new GZipReader(stream, options ?? new ReaderOptions()); + } + + #endregion Open + + protected override IEnumerable GetEntries(Stream stream) + { + return GZipEntry.GetEntries(stream, Options); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/IReader.cs b/BizHawk.Client.Common/SharpCompress/Readers/IReader.cs new file mode 100644 index 0000000000..db11164dee --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/IReader.cs @@ -0,0 +1,39 @@ +using System; +using System.IO; +using SharpCompress.Common; + +namespace SharpCompress.Readers +{ + public interface IReader : IDisposable + { + event EventHandler> EntryExtractionProgress; + + event EventHandler CompressedBytesRead; + event EventHandler FilePartExtractionBegin; + + ArchiveType ArchiveType { get; } + + IEntry Entry { get; } + + /// + /// Decompresses the current entry to the stream. This cannot be called twice for the current entry. + /// + /// + void WriteEntryTo(Stream writableStream); + + bool Cancelled { get; } + void Cancel(); + + /// + /// Moves to the next entry by reading more data from the underlying stream. This skips if data has not been read. + /// + /// + bool MoveToNextEntry(); + + /// + /// Opens the current entry as a stream that will decompress as it is read. + /// Read the entire stream or use SkipEntry on EntryStream. + /// + EntryStream OpenEntryStream(); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/IReaderExtensions.cs b/BizHawk.Client.Common/SharpCompress/Readers/IReaderExtensions.cs new file mode 100644 index 0000000000..d63cc96760 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/IReaderExtensions.cs @@ -0,0 +1,66 @@ +#if !NO_FILE +using System.IO; +using SharpCompress.Common; +#endif + +namespace SharpCompress.Readers +{ + public static class IReaderExtensions + { +#if !NO_FILE + public static void WriteEntryTo(this IReader reader, string filePath) + { + using (Stream stream = File.Open(filePath, FileMode.Create, FileAccess.Write)) + { + reader.WriteEntryTo(stream); + } + } + + public static void WriteEntryTo(this IReader reader, FileInfo filePath) + { + using (Stream stream = filePath.Open(FileMode.Create)) + { + reader.WriteEntryTo(stream); + } + } + + /// + /// Extract all remaining unread entries to specific directory, retaining filename + /// + public static void WriteAllToDirectory(this IReader reader, string destinationDirectory, + ExtractionOptions options = null) + { + while (reader.MoveToNextEntry()) + { + reader.WriteEntryToDirectory(destinationDirectory, options); + } + } + + /// + /// Extract to specific directory, retaining filename + /// + public static void WriteEntryToDirectory(this IReader reader, string destinationDirectory, + ExtractionOptions options = null) + { + ExtractionMethods.WriteEntryToDirectory(reader.Entry, destinationDirectory, options, + reader.WriteEntryToFile); + } + + /// + /// Extract to specific file + /// + public static void WriteEntryToFile(this IReader reader, string destinationFileName, + ExtractionOptions options = null) + { + ExtractionMethods.WriteEntryToFile(reader.Entry, destinationFileName, options, + (x, fm) => + { + using (FileStream fs = File.Open(destinationFileName, fm)) + { + reader.WriteEntryTo(fs); + } + }); + } +#endif + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/IReaderExtractionListener.cs b/BizHawk.Client.Common/SharpCompress/Readers/IReaderExtractionListener.cs new file mode 100644 index 0000000000..4a4adc4eb5 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/IReaderExtractionListener.cs @@ -0,0 +1,9 @@ +using SharpCompress.Common; + +namespace SharpCompress.Readers +{ + internal interface IReaderExtractionListener : IExtractionListener + { + void FireEntryExtractionProgress(Entry entry, long sizeTransferred, int iterations); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/Rar/MultiVolumeRarReader.cs b/BizHawk.Client.Common/SharpCompress/Readers/Rar/MultiVolumeRarReader.cs new file mode 100644 index 0000000000..db30985ad9 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/Rar/MultiVolumeRarReader.cs @@ -0,0 +1,119 @@ +using System.Collections; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Common.Rar; + +namespace SharpCompress.Readers.Rar +{ + internal class MultiVolumeRarReader : RarReader + { + private readonly IEnumerator streams; + private Stream tempStream; + + internal MultiVolumeRarReader(IEnumerable streams, ReaderOptions options) + : base(options) + { + this.streams = streams.GetEnumerator(); + } + + internal override void ValidateArchive(RarVolume archive) + { + } + + protected override Stream RequestInitialStream() + { + if (streams.MoveNext()) + { + return streams.Current; + } + throw new MultiVolumeExtractionException("No stream provided when requested by MultiVolumeRarReader"); + } + + internal override bool NextEntryForCurrentStream() + { + if (!base.NextEntryForCurrentStream()) { + // if we're got another stream to try to process then do so + return streams.MoveNext() && LoadStreamForReading(streams.Current); + } + return true; + } + + protected override IEnumerable CreateFilePartEnumerableForCurrentEntry() + { + var enumerator = new MultiVolumeStreamEnumerator(this, streams, tempStream); + tempStream = null; + return enumerator; + } + + private class MultiVolumeStreamEnumerator : IEnumerable, IEnumerator + { + private readonly MultiVolumeRarReader reader; + private readonly IEnumerator nextReadableStreams; + private Stream tempStream; + private bool isFirst = true; + + internal MultiVolumeStreamEnumerator(MultiVolumeRarReader r, IEnumerator nextReadableStreams, + Stream tempStream) + { + reader = r; + this.nextReadableStreams = nextReadableStreams; + this.tempStream = tempStream; + } + + public IEnumerator GetEnumerator() + { + return this; + } + + IEnumerator IEnumerable.GetEnumerator() + { + return this; + } + + public FilePart Current { get; private set; } + + public void Dispose() + { + } + + object IEnumerator.Current => Current; + + public bool MoveNext() + { + if (isFirst) + { + Current = reader.Entry.Parts.First(); + isFirst = false; //first stream already to go + return true; + } + + if (!reader.Entry.IsSplitAfter) + { + return false; + } + if (tempStream != null) + { + reader.LoadStreamForReading(tempStream); + tempStream = null; + } + else if (!nextReadableStreams.MoveNext()) + { + throw new MultiVolumeExtractionException("No stream provided when requested by MultiVolumeRarReader"); + } + else + { + reader.LoadStreamForReading(nextReadableStreams.Current); + } + + Current = reader.Entry.Parts.First(); + return true; + } + + public void Reset() + { + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/Rar/NonSeekableStreamFilePart.cs b/BizHawk.Client.Common/SharpCompress/Readers/Rar/NonSeekableStreamFilePart.cs new file mode 100644 index 0000000000..c5f62a4426 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/Rar/NonSeekableStreamFilePart.cs @@ -0,0 +1,21 @@ +using System.IO; +using SharpCompress.Common.Rar; +using SharpCompress.Common.Rar.Headers; + +namespace SharpCompress.Readers.Rar +{ + internal class NonSeekableStreamFilePart : RarFilePart + { + internal NonSeekableStreamFilePart(MarkHeader mh, FileHeader fh) + : base(mh, fh) + { + } + + internal override Stream GetCompressedStream() + { + return FileHeader.PackedStream; + } + + internal override string FilePartName => "Unknown Stream - File Entry: " + FileHeader.FileName; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReader.cs b/BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReader.cs new file mode 100644 index 0000000000..a40dca44a7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReader.cs @@ -0,0 +1,77 @@ +using System.Collections.Generic; +using System.IO; +using System.Linq; +using SharpCompress.Common; +using SharpCompress.Common.Rar; +using SharpCompress.Compressors.Rar; + +namespace SharpCompress.Readers.Rar +{ + /// + /// This class faciliates Reading a Rar Archive in a non-seekable forward-only manner + /// + public abstract class RarReader : AbstractReader + { + private RarVolume volume; + internal Lazy UnpackV2017 { get; } = new Lazy(() => new SharpCompress.Compressors.Rar.UnpackV2017.Unpack()); + internal Lazy UnpackV1 { get; } = new Lazy(() => new SharpCompress.Compressors.Rar.UnpackV1.Unpack()); + + internal RarReader(ReaderOptions options) + : base(options, ArchiveType.Rar) + { + } + + internal abstract void ValidateArchive(RarVolume archive); + + public override RarVolume Volume => volume; + + /// + /// Opens a RarReader for Non-seeking usage with a single volume + /// + /// + /// + /// + public static RarReader Open(Stream stream, ReaderOptions options = null) + { + stream.CheckNotNull("stream"); + return new SingleVolumeRarReader(stream, options ?? new ReaderOptions()); + } + + /// + /// Opens a RarReader for Non-seeking usage with multiple volumes + /// + /// + /// + /// + public static RarReader Open(IEnumerable streams, ReaderOptions options = null) + { + streams.CheckNotNull("streams"); + return new MultiVolumeRarReader(streams, options ?? new ReaderOptions()); + } + + protected override IEnumerable GetEntries(Stream stream) + { + volume = new RarReaderVolume(stream, Options); + foreach (RarFilePart fp in volume.ReadFileParts()) + { + ValidateArchive(volume); + yield return new RarReaderEntry(volume.IsSolidArchive, fp); + } + } + + protected virtual IEnumerable CreateFilePartEnumerableForCurrentEntry() + { + return Entry.Parts; + } + + protected override EntryStream GetEntryStream() + { + var stream = new MultiVolumeReadOnlyStream(CreateFilePartEnumerableForCurrentEntry().Cast(), this); + if (Entry.IsRarV3) + { + return CreateEntryStream(new RarCrcStream(UnpackV1.Value, Entry.FileHeader, stream)); + } + return CreateEntryStream(new RarCrcStream(UnpackV2017.Value, Entry.FileHeader, stream)); + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReaderEntry.cs b/BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReaderEntry.cs new file mode 100644 index 0000000000..2e29d6155c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReaderEntry.cs @@ -0,0 +1,34 @@ +using System.Collections.Generic; +using SharpCompress.Common; +using SharpCompress.Common.Rar; +using SharpCompress.Common.Rar.Headers; + +namespace SharpCompress.Readers.Rar +{ + public class RarReaderEntry : RarEntry + { + internal RarReaderEntry(bool solid, RarFilePart part) + { + Part = part; + IsSolid = solid; + } + + internal RarFilePart Part { get; } + + internal override IEnumerable Parts => Part.AsEnumerable(); + + internal override FileHeader FileHeader => Part.FileHeader; + + public override CompressionType CompressionType => CompressionType.Rar; + + /// + /// The compressed file size + /// + public override long CompressedSize => Part.FileHeader.CompressedSize; + + /// + /// The uncompressed file size + /// + public override long Size => Part.FileHeader.UncompressedSize; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReaderVolume.cs b/BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReaderVolume.cs new file mode 100644 index 0000000000..98a8c5c087 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/Rar/RarReaderVolume.cs @@ -0,0 +1,26 @@ +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common.Rar; +using SharpCompress.Common.Rar.Headers; +using SharpCompress.IO; + +namespace SharpCompress.Readers.Rar +{ + public class RarReaderVolume : RarVolume + { + internal RarReaderVolume(Stream stream, ReaderOptions options) + : base(StreamingMode.Streaming, stream, options) + { + } + + internal override RarFilePart CreateFilePart(MarkHeader markHeader, FileHeader fileHeader) + { + return new NonSeekableStreamFilePart(markHeader, fileHeader); + } + + internal override IEnumerable ReadFileParts() + { + return GetVolumeFileParts(); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/Rar/SingleVolumeRarReader.cs b/BizHawk.Client.Common/SharpCompress/Readers/Rar/SingleVolumeRarReader.cs new file mode 100644 index 0000000000..b8d64f8d5a --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/Rar/SingleVolumeRarReader.cs @@ -0,0 +1,30 @@ +using System.IO; +using SharpCompress.Common; +using SharpCompress.Common.Rar; + +namespace SharpCompress.Readers.Rar +{ + internal class SingleVolumeRarReader : RarReader + { + private readonly Stream stream; + + internal SingleVolumeRarReader(Stream stream, ReaderOptions options) + : base(options) + { + this.stream = stream; + } + + internal override void ValidateArchive(RarVolume archive) + { + if (archive.IsMultiVolume) { + var msg = "Streamed archive is a Multi-volume archive. Use different RarReader method to extract."; + throw new MultiVolumeExtractionException(msg); + } + } + + protected override Stream RequestInitialStream() + { + return stream; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/ReaderFactory.cs b/BizHawk.Client.Common/SharpCompress/Readers/ReaderFactory.cs new file mode 100644 index 0000000000..ec7d15c910 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/ReaderFactory.cs @@ -0,0 +1,107 @@ +using System; +using System.IO; +using SharpCompress.Archives.GZip; +using SharpCompress.Archives.Rar; +using SharpCompress.Archives.Tar; +using SharpCompress.Archives.Zip; +using SharpCompress.Common; +using SharpCompress.Compressors; +using SharpCompress.Compressors.BZip2; +using SharpCompress.Compressors.Deflate; +using SharpCompress.IO; +using SharpCompress.Readers.GZip; +using SharpCompress.Readers.Rar; +using SharpCompress.Readers.Tar; +using SharpCompress.Readers.Zip; +using SharpCompress.Compressors.LZMA; +using SharpCompress.Compressors.Xz; + +namespace SharpCompress.Readers +{ + public static class ReaderFactory + { + /// + /// Opens a Reader for Non-seeking usage + /// + /// + /// + /// + public static IReader Open(Stream stream, ReaderOptions options = null) + { + stream.CheckNotNull("stream"); + options = options ?? new ReaderOptions() + { + LeaveStreamOpen = false + }; + RewindableStream rewindableStream = new RewindableStream(stream); + rewindableStream.StartRecording(); + if (ZipArchive.IsZipFile(rewindableStream, options.Password)) + { + rewindableStream.Rewind(true); + return ZipReader.Open(rewindableStream, options); + } + rewindableStream.Rewind(false); + if (GZipArchive.IsGZipFile(rewindableStream)) + { + rewindableStream.Rewind(false); + GZipStream testStream = new GZipStream(rewindableStream, CompressionMode.Decompress); + if (TarArchive.IsTarFile(testStream)) + { + rewindableStream.Rewind(true); + return new TarReader(rewindableStream, options, CompressionType.GZip); + } + rewindableStream.Rewind(true); + return GZipReader.Open(rewindableStream, options); + } + + rewindableStream.Rewind(false); + if (BZip2Stream.IsBZip2(rewindableStream)) + { + rewindableStream.Rewind(false); + BZip2Stream testStream = new BZip2Stream(new NonDisposingStream(rewindableStream), CompressionMode.Decompress, false); + if (TarArchive.IsTarFile(testStream)) + { + rewindableStream.Rewind(true); + return new TarReader(rewindableStream, options, CompressionType.BZip2); + } + } + + rewindableStream.Rewind(false); + if (LZipStream.IsLZipFile(rewindableStream)) + { + rewindableStream.Rewind(false); + LZipStream testStream = new LZipStream(new NonDisposingStream(rewindableStream), CompressionMode.Decompress); + if (TarArchive.IsTarFile(testStream)) + { + rewindableStream.Rewind(true); + return new TarReader(rewindableStream, options, CompressionType.LZip); + } + } + rewindableStream.Rewind(false); + if (RarArchive.IsRarFile(rewindableStream, options)) + { + rewindableStream.Rewind(true); + return RarReader.Open(rewindableStream, options); + } + + rewindableStream.Rewind(false); + if (TarArchive.IsTarFile(rewindableStream)) + { + rewindableStream.Rewind(true); + return TarReader.Open(rewindableStream, options); + } + rewindableStream.Rewind(false); + if (XZStream.IsXZStream(rewindableStream)) + { + rewindableStream.Rewind(true); + XZStream testStream = new XZStream(rewindableStream); + if (TarArchive.IsTarFile(testStream)) + { + rewindableStream.Rewind(true); + return new TarReader(rewindableStream, options, CompressionType.Xz); + } + } + throw new InvalidOperationException("Cannot determine compressed stream type. Supported Reader Formats: Zip, GZip, BZip2, Tar, Rar, LZip, XZ"); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/ReaderOptions.cs b/BizHawk.Client.Common/SharpCompress/Readers/ReaderOptions.cs new file mode 100644 index 0000000000..683a859891 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/ReaderOptions.cs @@ -0,0 +1,14 @@ +using SharpCompress.Common; + +namespace SharpCompress.Readers +{ + public class ReaderOptions : OptionsBase + { + /// + /// Look for RarArchive (Check for self-extracting archives or cases where RarArchive isn't at the start of the file) + /// + public bool LookForHeader { get; set; } + + public string Password { get; set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/ReaderProgress.cs b/BizHawk.Client.Common/SharpCompress/Readers/ReaderProgress.cs new file mode 100644 index 0000000000..7b6f099dda --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/ReaderProgress.cs @@ -0,0 +1,24 @@ + + +using System; +using SharpCompress.Common; + +namespace SharpCompress.Readers +{ + public class ReaderProgress + { + private readonly IEntry _entry; + public long BytesTransferred { get; } + public int Iterations { get; } + + public int PercentageRead => (int)Math.Round(PercentageReadExact); + public double PercentageReadExact => (float)BytesTransferred / _entry.Size * 100; + + public ReaderProgress(IEntry entry, long bytesTransferred, int iterations) + { + _entry = entry; + BytesTransferred = bytesTransferred; + Iterations = iterations; + } + } +} diff --git a/BizHawk.Client.Common/SharpCompress/Readers/Tar/TarReader.cs b/BizHawk.Client.Common/SharpCompress/Readers/Tar/TarReader.cs new file mode 100644 index 0000000000..e6c67a9235 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/Tar/TarReader.cs @@ -0,0 +1,124 @@ +using System; +using System.Collections.Generic; +using System.IO; +using SharpCompress.Archives.GZip; +using SharpCompress.Archives.Tar; +using SharpCompress.Common; +using SharpCompress.Common.Tar; +using SharpCompress.Compressors; +using SharpCompress.Compressors.BZip2; +using SharpCompress.Compressors.Deflate; +using SharpCompress.IO; +using SharpCompress.Compressors.LZMA; +using SharpCompress.Compressors.Xz; + +namespace SharpCompress.Readers.Tar +{ + public class TarReader : AbstractReader + { + private readonly CompressionType compressionType; + + internal TarReader(Stream stream, ReaderOptions options, CompressionType compressionType) + : base(options, ArchiveType.Tar) + { + this.compressionType = compressionType; + Volume = new TarVolume(stream, options); + } + + public override TarVolume Volume { get; } + + protected override Stream RequestInitialStream() + { + var stream = base.RequestInitialStream(); + switch (compressionType) + { + case CompressionType.BZip2: + { + return new BZip2Stream(stream, CompressionMode.Decompress, false); + } + case CompressionType.GZip: + { + return new GZipStream(stream, CompressionMode.Decompress); + } + case CompressionType.LZip: + { + return new LZipStream(stream, CompressionMode.Decompress); + } + case CompressionType.Xz: + { + return new XZStream(stream); + } + case CompressionType.None: + { + return stream; + } + default: + { + throw new NotSupportedException("Invalid compression type: " + compressionType); + } + } + } + + #region Open + + /// + /// Opens a TarReader for Non-seeking usage with a single volume + /// + /// + /// + /// + public static TarReader Open(Stream stream, ReaderOptions options = null) + { + stream.CheckNotNull("stream"); + options = options ?? new ReaderOptions(); + RewindableStream rewindableStream = new RewindableStream(stream); + rewindableStream.StartRecording(); + if (GZipArchive.IsGZipFile(rewindableStream)) + { + rewindableStream.Rewind(false); + GZipStream testStream = new GZipStream(rewindableStream, CompressionMode.Decompress); + if (TarArchive.IsTarFile(testStream)) + { + rewindableStream.Rewind(true); + return new TarReader(rewindableStream, options, CompressionType.GZip); + } + throw new InvalidFormatException("Not a tar file."); + } + + rewindableStream.Rewind(false); + if (BZip2Stream.IsBZip2(rewindableStream)) + { + rewindableStream.Rewind(false); + BZip2Stream testStream = new BZip2Stream(rewindableStream, CompressionMode.Decompress, false); + if (TarArchive.IsTarFile(testStream)) + { + rewindableStream.Rewind(true); + return new TarReader(rewindableStream, options, CompressionType.BZip2); + } + throw new InvalidFormatException("Not a tar file."); + } + + rewindableStream.Rewind(false); + if (LZipStream.IsLZipFile(rewindableStream)) + { + rewindableStream.Rewind(false); + LZipStream testStream = new LZipStream(rewindableStream, CompressionMode.Decompress); + if (TarArchive.IsTarFile(testStream)) + { + rewindableStream.Rewind(true); + return new TarReader(rewindableStream, options, CompressionType.LZip); + } + throw new InvalidFormatException("Not a tar file."); + } + rewindableStream.Rewind(true); + return new TarReader(rewindableStream, options, CompressionType.None); + } + + #endregion Open + + protected override IEnumerable GetEntries(Stream stream) + { + return TarEntry.GetEntries(StreamingMode.Streaming, stream, compressionType, Options.ArchiveEncoding); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Readers/Zip/ZipReader.cs b/BizHawk.Client.Common/SharpCompress/Readers/Zip/ZipReader.cs new file mode 100644 index 0000000000..871f82836c --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Readers/Zip/ZipReader.cs @@ -0,0 +1,61 @@ +using System.Collections.Generic; +using System.IO; +using SharpCompress.Common; +using SharpCompress.Common.Zip; +using SharpCompress.Common.Zip.Headers; + +namespace SharpCompress.Readers.Zip +{ + public class ZipReader : AbstractReader + { + private readonly StreamingZipHeaderFactory _headerFactory; + + private ZipReader(Stream stream, ReaderOptions options) + : base(options, ArchiveType.Zip) + { + Volume = new ZipVolume(stream, options); + _headerFactory = new StreamingZipHeaderFactory(options.Password, options.ArchiveEncoding); + } + + public override ZipVolume Volume { get; } + + #region Open + + /// + /// Opens a ZipReader for Non-seeking usage with a single volume + /// + /// + /// + /// + public static ZipReader Open(Stream stream, ReaderOptions options = null) + { + stream.CheckNotNull("stream"); + return new ZipReader(stream, options ?? new ReaderOptions()); + } + + #endregion Open + + protected override IEnumerable GetEntries(Stream stream) + { + foreach (ZipHeader h in _headerFactory.ReadStreamHeader(stream)) + { + if (h != null) + { + switch (h.ZipHeaderType) + { + case ZipHeaderType.LocalEntry: + { + yield return new ZipEntry(new StreamingZipFilePart(h as LocalEntryHeader, + stream)); + } + break; + case ZipHeaderType.DirectoryEnd: + { + yield break; + } + } + } + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Utility.cs b/BizHawk.Client.Common/SharpCompress/Utility.cs new file mode 100644 index 0000000000..e97d093dbe --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Utility.cs @@ -0,0 +1,443 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Reflection; +using System.Reflection.Emit; +using System.Runtime.InteropServices; +#if NETCORE +using SharpCompress.Buffers; +#endif +using SharpCompress.Readers; + +namespace SharpCompress +{ + internal static class Utility + { + public static ReadOnlyCollection ToReadOnly(this IEnumerable items) + { + return new ReadOnlyCollection(items.ToList()); + } + + /// + /// Performs an unsigned bitwise right shift with the specified number + /// + /// Number to operate on + /// Ammount of bits to shift + /// The resulting number from the shift operation + public static int URShift(int number, int bits) + { + if (number >= 0) + { + return number >> bits; + } + return (number >> bits) + (2 << ~bits); + } + + /// + /// Performs an unsigned bitwise right shift with the specified number + /// + /// Number to operate on + /// Ammount of bits to shift + /// The resulting number from the shift operation + public static long URShift(long number, int bits) + { + if (number >= 0) + { + return number >> bits; + } + return (number >> bits) + (2L << ~bits); + } + + /// + /// Fills the array with an specific value from an specific index to an specific index. + /// + /// The array to be filled. + /// The first index to be filled. + /// The last index to be filled. + /// The value to fill the array with. + public static void Fill(T[] array, int fromindex, int toindex, T val) where T : struct + { + if (array.Length == 0) + { + throw new NullReferenceException(); + } + if (fromindex > toindex) + { + throw new ArgumentException(); + } + if ((fromindex < 0) || array.Length < toindex) + { + throw new IndexOutOfRangeException(); + } + for (int index = (fromindex > 0) ? fromindex-- : fromindex; index < toindex; index++) + { + array[index] = val; + } + } + +#if NET45 + // super fast memset, up to 40x faster than for loop on large arrays + // see https://stackoverflow.com/questions/1897555/what-is-the-equivalent-of-memset-in-c + private static readonly Action MemsetDelegate = CreateMemsetDelegate(); + + private static Action CreateMemsetDelegate() { + var dynamicMethod = new DynamicMethod( + "Memset", + MethodAttributes.Public | MethodAttributes.Static, + CallingConventions.Standard, + null, + new[] { typeof(IntPtr), typeof(byte), typeof(uint) }, + typeof(Utility), + true); + var generator = dynamicMethod.GetILGenerator(); + generator.Emit(OpCodes.Ldarg_0); + generator.Emit(OpCodes.Ldarg_1); + generator.Emit(OpCodes.Ldarg_2); + generator.Emit(OpCodes.Initblk); + generator.Emit(OpCodes.Ret); + return (Action)dynamicMethod.CreateDelegate(typeof(Action)); + } + + public static void Memset(byte[] array, byte what, int length) + { + var gcHandle = GCHandle.Alloc(array, GCHandleType.Pinned); + MemsetDelegate(gcHandle.AddrOfPinnedObject(), what, (uint)length); + gcHandle.Free(); + } +#else + public static void Memset(byte[] array, byte what, int length) + { + for(var i = 0; i < length; i++) + { + array[i] = what; + } + } +#endif + + public static void Memset(T[] array, T what, int length) + { + for(var i = 0; i < length; i++) + { + array[i] = what; + } + } + + public static void FillFast(T[] array, T val) where T : struct + { + for (int i = 0; i < array.Length; i++) + { + array[i] = val; + } + } + + public static void FillFast(T[] array, int start, int length, T val) where T : struct + { + int toIndex = start + length; + for (int i = start; i < toIndex; i++) + { + array[i] = val; + } + } + + + /// + /// Fills the array with an specific value. + /// + /// The array to be filled. + /// The value to fill the array with. + public static void Fill(T[] array, T val) where T : struct + { + Fill(array, 0, array.Length, val); + } + + public static void SetSize(this List list, int count) + { + if (count > list.Count) + { + for (int i = list.Count; i < count; i++) + { + list.Add(0x0); + } + } + else + { + byte[] temp = new byte[count]; + list.CopyTo(temp, 0); + list.Clear(); + list.AddRange(temp); + } + } + + public static void AddRange(this ICollection destination, IEnumerable source) + { + foreach (T item in source) + { + destination.Add(item); + } + } + + public static void ForEach(this IEnumerable items, Action action) + { + foreach (T item in items) + { + action(item); + } + } + + public static void Copy(Array sourceArray, long sourceIndex, Array destinationArray, long destinationIndex, long length) + { + if (sourceIndex > Int32.MaxValue || sourceIndex < Int32.MinValue) + throw new ArgumentOutOfRangeException(); + if (destinationIndex > Int32.MaxValue || destinationIndex < Int32.MinValue) + throw new ArgumentOutOfRangeException(); + if (length > Int32.MaxValue || length < Int32.MinValue) + throw new ArgumentOutOfRangeException(); + + Array.Copy(sourceArray, (int)sourceIndex, destinationArray, (int)destinationIndex, (int)length); + } + + public static IEnumerable AsEnumerable(this T item) + { + yield return item; + } + + public static void CheckNotNull(this object obj, string name) + { + if (obj == null) + { + throw new ArgumentNullException(name); + } + } + + public static void CheckNotNullOrEmpty(this string obj, string name) + { + obj.CheckNotNull(name); + if (obj.Length == 0) + { + throw new ArgumentException("String is empty."); + } + } + + public static void Skip(this Stream source, long advanceAmount) + { + if (source.CanSeek) + { + source.Position += advanceAmount; + return; + } + + byte[] buffer = GetTransferByteArray(); + try + { + int read = 0; + int readCount = 0; + do + { + readCount = buffer.Length; + if (readCount > advanceAmount) + { + readCount = (int)advanceAmount; + } + read = source.Read(buffer, 0, readCount); + if (read <= 0) + { + break; + } + advanceAmount -= read; + if (advanceAmount == 0) + { + break; + } + } + while (true); + } + finally + { +#if NETCORE + ArrayPool.Shared.Return(buffer); +#endif + } + } + + public static void Skip(this Stream source) + { + byte[] buffer = GetTransferByteArray(); + try + { + do + { + } + while (source.Read(buffer, 0, buffer.Length) == buffer.Length); + } + finally + { +#if NETCORE + ArrayPool.Shared.Return(buffer); +#endif + } + } + + public static DateTime DosDateToDateTime(UInt16 iDate, UInt16 iTime) + { + int year = iDate / 512 + 1980; + int month = iDate % 512 / 32; + int day = iDate % 512 % 32; + int hour = iTime / 2048; + int minute = iTime % 2048 / 32; + int second = iTime % 2048 % 32 * 2; + + if (iDate == UInt16.MaxValue || month == 0 || day == 0) + { + year = 1980; + month = 1; + day = 1; + } + + if (iTime == UInt16.MaxValue) + { + hour = minute = second = 0; + } + + DateTime dt; + try + { + dt = new DateTime(year, month, day, hour, minute, second, DateTimeKind.Local); + } + catch + { + dt = new DateTime(); + } + return dt; + } + + public static uint DateTimeToDosTime(this DateTime? dateTime) + { + if (dateTime == null) + { + return 0; + } + + var localDateTime = dateTime.Value.ToLocalTime(); + + return (uint)( + (localDateTime.Second / 2) | (localDateTime.Minute << 5) | (localDateTime.Hour << 11) | + (localDateTime.Day << 16) | (localDateTime.Month << 21) | + ((localDateTime.Year - 1980) << 25)); + } + + public static DateTime DosDateToDateTime(UInt32 iTime) + { + return DosDateToDateTime((UInt16)(iTime / 65536), + (UInt16)(iTime % 65536)); + } + + /// + /// Convert Unix time value to a DateTime object. + /// + /// The Unix time stamp you want to convert to DateTime. + /// Returns a DateTime object that represents value of the Unix time. + public static DateTime UnixTimeToDateTime(long unixtime) + { + DateTime sTime = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); + return sTime.AddSeconds(unixtime); + } + + public static long TransferTo(this Stream source, Stream destination) + { + byte[] array = GetTransferByteArray(); + try + { + int count; + long total = 0; + while (ReadTransferBlock(source, array, out count)) + { + total += count; + destination.Write(array, 0, count); + } + return total; + } + finally + { +#if NETCORE + ArrayPool.Shared.Return(array); +#endif + } + } + + public static long TransferTo(this Stream source, Stream destination, Common.Entry entry, IReaderExtractionListener readerExtractionListener) + { + byte[] array = GetTransferByteArray(); + try + { + int count; + var iterations = 0; + long total = 0; + while (ReadTransferBlock(source, array, out count)) + { + total += count; + destination.Write(array, 0, count); + iterations++; + readerExtractionListener.FireEntryExtractionProgress(entry, total, iterations); + } + return total; + } + finally + { +#if NETCORE + ArrayPool.Shared.Return(array); +#endif + } + } + + private static bool ReadTransferBlock(Stream source, byte[] array, out int count) + { + return (count = source.Read(array, 0, array.Length)) != 0; + } + + private static byte[] GetTransferByteArray() + { +#if NETCORE + return ArrayPool.Shared.Rent(81920); +#else + return new byte[81920]; +#endif + } + + public static bool ReadFully(this Stream stream, byte[] buffer) + { + int total = 0; + int read; + while ((read = stream.Read(buffer, total, buffer.Length - total)) > 0) + { + total += read; + if (total >= buffer.Length) + { + return true; + } + } + return (total >= buffer.Length); + } + + public static string TrimNulls(this string source) + { + return source.Replace('\0', ' ').Trim(); + } + + public static bool BinaryEquals(this byte[] source, byte[] target) + { + if (source.Length != target.Length) + { + return false; + } + for (int i = 0; i < source.Length; ++i) + { + if (source[i] != target[i]) + { + return false; + } + } + return true; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/AbstractWriter.cs b/BizHawk.Client.Common/SharpCompress/Writers/AbstractWriter.cs new file mode 100644 index 0000000000..f6656a8bbc --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/AbstractWriter.cs @@ -0,0 +1,57 @@ +using System; +using System.IO; +using SharpCompress.Common; + +namespace SharpCompress.Writers +{ + public abstract class AbstractWriter : IWriter + { + private bool _isDisposed; + + protected AbstractWriter(ArchiveType type, WriterOptions writerOptions) + { + WriterType = type; + WriterOptions = writerOptions; + } + + protected void InitalizeStream(Stream stream) + { + OutputStream = stream; + } + + protected Stream OutputStream { get; private set; } + + public ArchiveType WriterType { get; } + + protected WriterOptions WriterOptions { get; } + + public abstract void Write(string filename, Stream source, DateTime? modificationTime); + + protected virtual void Dispose(bool isDisposing) + { + if (isDisposing) + { + OutputStream.Dispose(); + } + } + + public void Dispose() + { + if (!_isDisposed) + { + GC.SuppressFinalize(this); + Dispose(true); + _isDisposed = true; + } + } + + ~AbstractWriter() + { + if (!_isDisposed) + { + Dispose(false); + _isDisposed = true; + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/GZip/GZipWriter.cs b/BizHawk.Client.Common/SharpCompress/Writers/GZip/GZipWriter.cs new file mode 100644 index 0000000000..735d89f5cc --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/GZip/GZipWriter.cs @@ -0,0 +1,49 @@ +using System; +using System.IO; +using SharpCompress.Common; +using SharpCompress.Compressors; +using SharpCompress.Compressors.Deflate; +using SharpCompress.IO; + +namespace SharpCompress.Writers.GZip +{ + public class GZipWriter : AbstractWriter + { + private bool _wroteToStream; + + public GZipWriter(Stream destination, GZipWriterOptions options = null) + : base(ArchiveType.GZip, options ?? new GZipWriterOptions()) + { + if (WriterOptions.LeaveStreamOpen) + { + destination = new NonDisposingStream(destination); + } + InitalizeStream(new GZipStream(destination, CompressionMode.Compress, + options?.CompressionLevel ?? CompressionLevel.Default, + WriterOptions.ArchiveEncoding.GetEncoding())); + } + + protected override void Dispose(bool isDisposing) + { + if (isDisposing) + { + //dispose here to finish the GZip, GZip won't close the underlying stream + OutputStream.Dispose(); + } + base.Dispose(isDisposing); + } + + public override void Write(string filename, Stream source, DateTime? modificationTime) + { + if (_wroteToStream) + { + throw new ArgumentException("Can only write a single stream to a GZip file."); + } + GZipStream stream = OutputStream as GZipStream; + stream.FileName = filename; + stream.LastModified = modificationTime; + source.TransferTo(stream); + _wroteToStream = true; + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/GZip/GZipWriterOptions.cs b/BizHawk.Client.Common/SharpCompress/Writers/GZip/GZipWriterOptions.cs new file mode 100644 index 0000000000..38c3a723e7 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/GZip/GZipWriterOptions.cs @@ -0,0 +1,28 @@ +using SharpCompress.Common; +using SharpCompress.Compressors.Deflate; + +namespace SharpCompress.Writers.GZip +{ + public class GZipWriterOptions : WriterOptions + { + public GZipWriterOptions() + : base(CompressionType.GZip) + { + } + + internal GZipWriterOptions(WriterOptions options) + : base(options.CompressionType) + { + LeaveStreamOpen = options.LeaveStreamOpen; + ArchiveEncoding = options.ArchiveEncoding; + + var writerOptions = options as GZipWriterOptions; + if (writerOptions != null) + { + CompressionLevel = writerOptions.CompressionLevel; + } + } + + public CompressionLevel CompressionLevel { get; set; } = CompressionLevel.Default; + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/IWriter.cs b/BizHawk.Client.Common/SharpCompress/Writers/IWriter.cs new file mode 100644 index 0000000000..a15225bff3 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/IWriter.cs @@ -0,0 +1,12 @@ +using System; +using System.IO; +using SharpCompress.Common; + +namespace SharpCompress.Writers +{ + public interface IWriter : IDisposable + { + ArchiveType WriterType { get; } + void Write(string filename, Stream source, DateTime? modificationTime); + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/IWriterExtensions.cs b/BizHawk.Client.Common/SharpCompress/Writers/IWriterExtensions.cs new file mode 100644 index 0000000000..2dbd2d9cba --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/IWriterExtensions.cs @@ -0,0 +1,64 @@ +#if !NO_FILE +using System; +#endif +using System.IO; +using System.Linq; +using System.Linq.Expressions; + +namespace SharpCompress.Writers +{ + public static class IWriterExtensions + { + public static void Write(this IWriter writer, string entryPath, Stream source) + { + writer.Write(entryPath, source, null); + } + +#if !NO_FILE + public static void Write(this IWriter writer, string entryPath, FileInfo source) + { + if (!source.Exists) + { + throw new ArgumentException("Source does not exist: " + source.FullName); + } + using (var stream = source.OpenRead()) + { + writer.Write(entryPath, stream, source.LastWriteTime); + } + } + + public static void Write(this IWriter writer, string entryPath, string source) + { + writer.Write(entryPath, new FileInfo(source)); + } + + public static void WriteAll(this IWriter writer, string directory, string searchPattern = "*", SearchOption option = SearchOption.TopDirectoryOnly) + { + writer.WriteAll(directory, searchPattern, null, option); + } + + public static void WriteAll(this IWriter writer, string directory, string searchPattern = "*", Expression> fileSearchFunc = null, + SearchOption option = SearchOption.TopDirectoryOnly) + { + if (!Directory.Exists(directory)) + { + throw new ArgumentException("Directory does not exist: " + directory); + } + + if (fileSearchFunc == null) + { + fileSearchFunc = n => true; + } +#if NET35 + foreach (var file in Directory.GetDirectories(directory, searchPattern, option).Where(fileSearchFunc.Compile())) +#else + foreach (var file in Directory.EnumerateFiles(directory, searchPattern, option).Where(fileSearchFunc.Compile())) +#endif + { + writer.Write(file.Substring(directory.Length), file); + } + } + +#endif + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/Tar/TarWriter.cs b/BizHawk.Client.Common/SharpCompress/Writers/Tar/TarWriter.cs new file mode 100644 index 0000000000..8c78236668 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/Tar/TarWriter.cs @@ -0,0 +1,130 @@ +using System; +using System.IO; +using SharpCompress.Common; +using SharpCompress.Common.Tar.Headers; +using SharpCompress.Compressors; +using SharpCompress.Compressors.BZip2; +using SharpCompress.Compressors.Deflate; +using SharpCompress.Compressors.LZMA; +using SharpCompress.IO; + +namespace SharpCompress.Writers.Tar +{ + public class TarWriter : AbstractWriter + { + private readonly bool finalizeArchiveOnClose; + + public TarWriter(Stream destination, TarWriterOptions options) + : base(ArchiveType.Tar, options) + { + finalizeArchiveOnClose = options.FinalizeArchiveOnClose; + + if (!destination.CanWrite) + { + throw new ArgumentException("Tars require writable streams."); + } + if (WriterOptions.LeaveStreamOpen) + { + destination = new NonDisposingStream(destination); + } + switch (options.CompressionType) + { + case CompressionType.None: + break; + case CompressionType.BZip2: + { + destination = new BZip2Stream(destination, CompressionMode.Compress, false); + } + break; + case CompressionType.GZip: + { + destination = new GZipStream(destination, CompressionMode.Compress); + } + break; + case CompressionType.LZip: + { + destination = new LZipStream(destination, CompressionMode.Compress); + } + break; + default: + { + throw new InvalidFormatException("Tar does not support compression: " + options.CompressionType); + } + } + InitalizeStream(destination); + } + + public override void Write(string filename, Stream source, DateTime? modificationTime) + { + Write(filename, source, modificationTime, null); + } + + private string NormalizeFilename(string filename) + { + filename = filename.Replace('\\', '/'); + + int pos = filename.IndexOf(':'); + if (pos >= 0) + { + filename = filename.Remove(0, pos + 1); + } + + return filename.Trim('/'); + } + + public void Write(string filename, Stream source, DateTime? modificationTime, long? size) + { + if (!source.CanSeek && size == null) + { + throw new ArgumentException("Seekable stream is required if no size is given."); + } + + long realSize = size ?? source.Length; + + TarHeader header = new TarHeader(WriterOptions.ArchiveEncoding); + + header.LastModifiedTime = modificationTime ?? TarHeader.EPOCH; + header.Name = NormalizeFilename(filename); + header.Size = realSize; + header.Write(OutputStream); + size = source.TransferTo(OutputStream); + PadTo512(size.Value, false); + } + + private void PadTo512(long size, bool forceZeros) + { + int zeros = (int)size % 512; + if (zeros == 0 && !forceZeros) + { + return; + } + zeros = 512 - zeros; + OutputStream.Write(new byte[zeros], 0, zeros); + } + + protected override void Dispose(bool isDisposing) + { + if (isDisposing) + { + if (finalizeArchiveOnClose) { + PadTo512(0, true); + PadTo512(0, true); + } + switch (OutputStream) + { + case BZip2Stream b: + { + b.Finish(); + break; + } + case LZipStream l: + { + l.Finish(); + break; + } + } + } + base.Dispose(isDisposing); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/Tar/TarWriterOptions.cs b/BizHawk.Client.Common/SharpCompress/Writers/Tar/TarWriterOptions.cs new file mode 100644 index 0000000000..9174b23f32 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/Tar/TarWriterOptions.cs @@ -0,0 +1,23 @@ +using SharpCompress.Archives; +using SharpCompress.Common; + +namespace SharpCompress.Writers.Tar +{ + public class TarWriterOptions : WriterOptions + { + /// + /// Indicates if archive should be finalized (by 2 empty blocks) on close. + /// + public bool FinalizeArchiveOnClose { get; } + + public TarWriterOptions(CompressionType compressionType, bool finalizeArchiveOnClose) + : base(compressionType) + { + FinalizeArchiveOnClose = finalizeArchiveOnClose; + } + + internal TarWriterOptions(WriterOptions options) : this(options.CompressionType, true) + { + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/WriterFactory.cs b/BizHawk.Client.Common/SharpCompress/Writers/WriterFactory.cs new file mode 100644 index 0000000000..1f1c01a500 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/WriterFactory.cs @@ -0,0 +1,39 @@ +using System; +using System.IO; +using SharpCompress.Common; +using SharpCompress.Writers.GZip; +using SharpCompress.Writers.Tar; +using SharpCompress.Writers.Zip; + +namespace SharpCompress.Writers +{ + public static class WriterFactory + { + public static IWriter Open(Stream stream, ArchiveType archiveType, WriterOptions writerOptions) + { + switch (archiveType) + { + case ArchiveType.GZip: + { + if (writerOptions.CompressionType != CompressionType.GZip) + { + throw new InvalidFormatException("GZip archives only support GZip compression type."); + } + return new GZipWriter(stream, new GZipWriterOptions(writerOptions)); + } + case ArchiveType.Zip: + { + return new ZipWriter(stream, new ZipWriterOptions(writerOptions)); + } + case ArchiveType.Tar: + { + return new TarWriter(stream, new TarWriterOptions(writerOptions)); + } + default: + { + throw new NotSupportedException("Archive Type does not have a Writer: " + archiveType); + } + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/WriterOptions.cs b/BizHawk.Client.Common/SharpCompress/Writers/WriterOptions.cs new file mode 100644 index 0000000000..db1e2ffa81 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/WriterOptions.cs @@ -0,0 +1,18 @@ +using SharpCompress.Common; + +namespace SharpCompress.Writers +{ + public class WriterOptions : OptionsBase + { + public WriterOptions(CompressionType compressionType) + { + CompressionType = compressionType; + } + public CompressionType CompressionType { get; set; } + + public static implicit operator WriterOptions(CompressionType compressionType) + { + return new WriterOptions(compressionType); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipCentralDirectoryEntry.cs b/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipCentralDirectoryEntry.cs new file mode 100644 index 0000000000..1f1e646621 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipCentralDirectoryEntry.cs @@ -0,0 +1,106 @@ +using System; +using System.IO; +using System.Text; +using SharpCompress.Common; +using SharpCompress.Common.Zip; +using SharpCompress.Common.Zip.Headers; +using SharpCompress.Converters; + +namespace SharpCompress.Writers.Zip +{ + internal class ZipCentralDirectoryEntry + { + private readonly ZipCompressionMethod compression; + private readonly string fileName; + private readonly ArchiveEncoding archiveEncoding; + + public ZipCentralDirectoryEntry(ZipCompressionMethod compression, string fileName, ulong headerOffset, ArchiveEncoding archiveEncoding) + { + this.compression = compression; + this.fileName = fileName; + HeaderOffset = headerOffset; + this.archiveEncoding = archiveEncoding; + } + + internal DateTime? ModificationTime { get; set; } + internal string Comment { get; set; } + internal uint Crc { get; set; } + internal ulong Compressed { get; set; } + internal ulong Decompressed { get; set; } + internal ushort Zip64HeaderOffset { get; set; } + internal ulong HeaderOffset { get; } + + internal uint Write(Stream outputStream) + { + byte[] encodedFilename = archiveEncoding.Encode(fileName); + byte[] encodedComment = archiveEncoding.Encode(Comment); + + var zip64_stream = Compressed >= uint.MaxValue || Decompressed >= uint.MaxValue; + var zip64 = zip64_stream || HeaderOffset >= uint.MaxValue || Zip64HeaderOffset != 0; + + var compressedvalue = zip64 ? uint.MaxValue : (uint)Compressed; + var decompressedvalue = zip64 ? uint.MaxValue : (uint)Decompressed; + var headeroffsetvalue = zip64 ? uint.MaxValue : (uint)HeaderOffset; + var extralength = zip64 ? (2 + 2 + 8 + 8 + 8 + 4) : 0; + var version = (byte)(zip64 ? 45 : 20); // Version 20 required for deflate/encryption + + HeaderFlags flags = Equals(archiveEncoding.GetEncoding(), Encoding.UTF8) ? HeaderFlags.Efs : HeaderFlags.None; + if (!outputStream.CanSeek) + { + // Cannot use data descriptors with zip64: + // https://blogs.oracle.com/xuemingshen/entry/is_zipinput_outputstream_handling_of + + // We check that streams are not written too large in the ZipWritingStream, + // so this extra guard is not required, but kept to simplify changing the code + // once the zip64 post-data issue is resolved + if (!zip64_stream) + flags |= HeaderFlags.UsePostDataDescriptor; + + if (compression == ZipCompressionMethod.LZMA) + { + flags |= HeaderFlags.Bit1; // eos marker + } + } + + //constant sig, then version made by, then version to extract + outputStream.Write(new byte[] { 80, 75, 1, 2, version, 0, version, 0 }, 0, 8); + + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)flags), 0, 2); + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)compression), 0, 2); // zipping method + outputStream.Write(DataConverter.LittleEndian.GetBytes(ModificationTime.DateTimeToDosTime()), 0, 4); + + // zipping date and time + outputStream.Write(DataConverter.LittleEndian.GetBytes(Crc), 0, 4); // file CRC + outputStream.Write(DataConverter.LittleEndian.GetBytes(compressedvalue), 0, 4); // compressed file size + outputStream.Write(DataConverter.LittleEndian.GetBytes(decompressedvalue), 0, 4); // uncompressed file size + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)encodedFilename.Length), 0, 2); // Filename in zip + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)extralength), 0, 2); // extra length + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)encodedComment.Length), 0, 2); + + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0), 0, 2); // disk=0 + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0), 0, 2); // file type: binary + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0), 0, 2); // Internal file attributes + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0x8100), 0, 2); + + // External file attributes (normal/readable) + outputStream.Write(DataConverter.LittleEndian.GetBytes(headeroffsetvalue), 0, 4); // Offset of header + + outputStream.Write(encodedFilename, 0, encodedFilename.Length); + if (zip64) + { + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0x0001), 0, 2); + outputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)(extralength - 4)), 0, 2); + + outputStream.Write(DataConverter.LittleEndian.GetBytes(Decompressed), 0, 8); + outputStream.Write(DataConverter.LittleEndian.GetBytes(Compressed), 0, 8); + outputStream.Write(DataConverter.LittleEndian.GetBytes(HeaderOffset), 0, 8); + outputStream.Write(DataConverter.LittleEndian.GetBytes(0), 0, 4); // VolumeNumber = 0 + } + + outputStream.Write(encodedComment, 0, encodedComment.Length); + + return (uint)(8 + 2 + 2 + 4 + 4 + 4 + 4 + 2 + 2 + 2 + + 2 + 2 + 2 + 2 + 4 + encodedFilename.Length + extralength + encodedComment.Length); + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriter.cs b/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriter.cs new file mode 100644 index 0000000000..89a00c3c59 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriter.cs @@ -0,0 +1,484 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; +using SharpCompress.Common; +using SharpCompress.Common.Zip; +using SharpCompress.Common.Zip.Headers; +using SharpCompress.Compressors; +using SharpCompress.Compressors.BZip2; +using SharpCompress.Compressors.Deflate; +using SharpCompress.Compressors.LZMA; +using SharpCompress.Compressors.PPMd; +using SharpCompress.Converters; +using SharpCompress.IO; + +namespace SharpCompress.Writers.Zip +{ + public class ZipWriter : AbstractWriter + { + private readonly CompressionType compressionType; + private readonly CompressionLevel compressionLevel; + private readonly List entries = new List(); + private readonly string zipComment; + private long streamPosition; + private PpmdProperties ppmdProps; + private readonly bool isZip64; + + public ZipWriter(Stream destination, ZipWriterOptions zipWriterOptions) + : base(ArchiveType.Zip, zipWriterOptions) + { + zipComment = zipWriterOptions.ArchiveComment ?? string.Empty; + isZip64 = zipWriterOptions.UseZip64; + if (destination.CanSeek) + { + streamPosition = destination.Position; + } + + compressionType = zipWriterOptions.CompressionType; + compressionLevel = zipWriterOptions.DeflateCompressionLevel; + + if (WriterOptions.LeaveStreamOpen) + { + destination = new NonDisposingStream(destination); + } + InitalizeStream(destination); + } + + private PpmdProperties PpmdProperties + { + get + { + if (ppmdProps == null) + { + ppmdProps = new PpmdProperties(); + } + return ppmdProps; + } + } + + protected override void Dispose(bool isDisposing) + { + if (isDisposing) + { + ulong size = 0; + foreach (ZipCentralDirectoryEntry entry in entries) + { + size += entry.Write(OutputStream); + } + WriteEndRecord(size); + } + base.Dispose(isDisposing); + } + + private static ZipCompressionMethod ToZipCompressionMethod(CompressionType compressionType) + { + switch (compressionType) + { + case CompressionType.None: + { + return ZipCompressionMethod.None; + } + case CompressionType.Deflate: + { + return ZipCompressionMethod.Deflate; + } + case CompressionType.BZip2: + { + return ZipCompressionMethod.BZip2; + } + case CompressionType.LZMA: + { + return ZipCompressionMethod.LZMA; + } + case CompressionType.PPMd: + { + return ZipCompressionMethod.PPMd; + } + default: + throw new InvalidFormatException("Invalid compression method: " + compressionType); + } + } + + public override void Write(string entryPath, Stream source, DateTime? modificationTime) + { + Write(entryPath, source, new ZipWriterEntryOptions() + { + ModificationDateTime = modificationTime + }); + } + + public void Write(string entryPath, Stream source, ZipWriterEntryOptions zipWriterEntryOptions) + { + using (Stream output = WriteToStream(entryPath, zipWriterEntryOptions)) + { + source.TransferTo(output); + } + } + + public Stream WriteToStream(string entryPath, ZipWriterEntryOptions options) + { + var compression = ToZipCompressionMethod(options.CompressionType ?? compressionType); + + entryPath = NormalizeFilename(entryPath); + options.ModificationDateTime = options.ModificationDateTime ?? DateTime.Now; + options.EntryComment = options.EntryComment ?? string.Empty; + var entry = new ZipCentralDirectoryEntry(compression, entryPath, (ulong)streamPosition, WriterOptions.ArchiveEncoding) + { + Comment = options.EntryComment, + ModificationTime = options.ModificationDateTime + }; + + // Use the archive default setting for zip64 and allow overrides + var useZip64 = isZip64; + if (options.EnableZip64.HasValue) + useZip64 = options.EnableZip64.Value; + + var headersize = (uint)WriteHeader(entryPath, options, entry, useZip64); + streamPosition += headersize; + return new ZipWritingStream(this, OutputStream, entry, compression, + options.DeflateCompressionLevel ?? compressionLevel); + } + + private string NormalizeFilename(string filename) + { + filename = filename.Replace('\\', '/'); + + int pos = filename.IndexOf(':'); + if (pos >= 0) + { + filename = filename.Remove(0, pos + 1); + } + + return filename.Trim('/'); + } + + private int WriteHeader(string filename, ZipWriterEntryOptions zipWriterEntryOptions, ZipCentralDirectoryEntry entry, bool useZip64) + { + // We err on the side of caution until the zip specification clarifies how to support this + if (!OutputStream.CanSeek && useZip64) + throw new NotSupportedException("Zip64 extensions are not supported on non-seekable streams"); + + var explicitZipCompressionInfo = ToZipCompressionMethod(zipWriterEntryOptions.CompressionType ?? compressionType); + byte[] encodedFilename = WriterOptions.ArchiveEncoding.Encode(filename); + + OutputStream.Write(DataConverter.LittleEndian.GetBytes(ZipHeaderFactory.ENTRY_HEADER_BYTES), 0, 4); + if (explicitZipCompressionInfo == ZipCompressionMethod.Deflate) + { + if (OutputStream.CanSeek && useZip64) + OutputStream.Write(new byte[] { 45, 0 }, 0, 2); //smallest allowed version for zip64 + else + OutputStream.Write(new byte[] { 20, 0 }, 0, 2); //older version which is more compatible + } + else + { + OutputStream.Write(new byte[] { 63, 0 }, 0, 2); //version says we used PPMd or LZMA + } + HeaderFlags flags = Equals(WriterOptions.ArchiveEncoding.GetEncoding(), Encoding.UTF8) ? HeaderFlags.Efs : 0; + if (!OutputStream.CanSeek) + { + flags |= HeaderFlags.UsePostDataDescriptor; + + if (explicitZipCompressionInfo == ZipCompressionMethod.LZMA) + { + flags |= HeaderFlags.Bit1; // eos marker + } + } + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)flags), 0, 2); + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)explicitZipCompressionInfo), 0, 2); // zipping method + OutputStream.Write(DataConverter.LittleEndian.GetBytes(zipWriterEntryOptions.ModificationDateTime.DateTimeToDosTime()), 0, 4); + + // zipping date and time + OutputStream.Write(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 0, 12); + + // unused CRC, un/compressed size, updated later + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)encodedFilename.Length), 0, 2); // filename length + + var extralength = 0; + if (OutputStream.CanSeek && useZip64) + extralength = 2 + 2 + 8 + 8; + + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)extralength), 0, 2); // extra length + OutputStream.Write(encodedFilename, 0, encodedFilename.Length); + + if (extralength != 0) + { + OutputStream.Write(new byte[extralength], 0, extralength); // reserve space for zip64 data + entry.Zip64HeaderOffset = (ushort)(6 + 2 + 2 + 4 + 12 + 2 + 2 + encodedFilename.Length); + } + + return 6 + 2 + 2 + 4 + 12 + 2 + 2 + encodedFilename.Length + extralength; + } + + private void WriteFooter(uint crc, uint compressed, uint uncompressed) + { + OutputStream.Write(DataConverter.LittleEndian.GetBytes(crc), 0, 4); + OutputStream.Write(DataConverter.LittleEndian.GetBytes(compressed), 0, 4); + OutputStream.Write(DataConverter.LittleEndian.GetBytes(uncompressed), 0, 4); + } + + private void WriteEndRecord(ulong size) + { + byte[] encodedComment = WriterOptions.ArchiveEncoding.Encode(zipComment); + var zip64 = isZip64 || entries.Count > ushort.MaxValue || streamPosition >= uint.MaxValue || size >= uint.MaxValue; + + var sizevalue = size >= uint.MaxValue ? uint.MaxValue : (uint)size; + var streampositionvalue = streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streamPosition; + + if (zip64) + { + var recordlen = 2 + 2 + 4 + 4 + 8 + 8 + 8 + 8; + + // Write zip64 end of central directory record + OutputStream.Write(new byte[] { 80, 75, 6, 6 }, 0, 4); + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)recordlen), 0, 8); // Size of zip64 end of central directory record + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0), 0, 2); // Made by + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)45), 0, 2); // Version needed + + OutputStream.Write(DataConverter.LittleEndian.GetBytes((uint)0), 0, 4); // Disk number + OutputStream.Write(DataConverter.LittleEndian.GetBytes((uint)0), 0, 4); // Central dir disk + + // TODO: entries.Count is int, so max 2^31 files + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)entries.Count), 0, 8); // Entries in this disk + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)entries.Count), 0, 8); // Total entries + OutputStream.Write(DataConverter.LittleEndian.GetBytes(size), 0, 8); // Central Directory size + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)streamPosition), 0, 8); // Disk offset + + // Write zip64 end of central directory locator + OutputStream.Write(new byte[] { 80, 75, 6, 7 }, 0, 4); + + OutputStream.Write(DataConverter.LittleEndian.GetBytes(0uL), 0, 4); // Entry disk + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ulong)streamPosition + size), 0, 8); // Offset to the zip64 central directory + OutputStream.Write(DataConverter.LittleEndian.GetBytes(0u), 0, 4); // Number of disks + + streamPosition += recordlen + (4 + 4 + 8 + 4); + streampositionvalue = streamPosition >= uint.MaxValue ? uint.MaxValue : (uint)streampositionvalue; + } + + // Write normal end of central directory record + OutputStream.Write(new byte[] { 80, 75, 5, 6, 0, 0, 0, 0 }, 0, 8); + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)entries.Count), 0, 2); + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)entries.Count), 0, 2); + OutputStream.Write(DataConverter.LittleEndian.GetBytes(sizevalue), 0, 4); + OutputStream.Write(DataConverter.LittleEndian.GetBytes((uint)streampositionvalue), 0, 4); + OutputStream.Write(DataConverter.LittleEndian.GetBytes((ushort)encodedComment.Length), 0, 2); + OutputStream.Write(encodedComment, 0, encodedComment.Length); + } + + #region Nested type: ZipWritingStream + + internal class ZipWritingStream : Stream + { + private readonly CRC32 crc = new CRC32(); + private readonly ZipCentralDirectoryEntry entry; + private readonly Stream originalStream; + private readonly Stream writeStream; + private readonly ZipWriter writer; + private readonly ZipCompressionMethod zipCompressionMethod; + private readonly CompressionLevel compressionLevel; + private CountingWritableSubStream counting; + private ulong decompressed; + + // Flag to prevent throwing exceptions on Dispose + private bool limitsExceeded; + private bool isDisposed; + + internal ZipWritingStream(ZipWriter writer, Stream originalStream, ZipCentralDirectoryEntry entry, + ZipCompressionMethod zipCompressionMethod, CompressionLevel compressionLevel) + { + this.writer = writer; + this.originalStream = originalStream; + this.writer = writer; + this.entry = entry; + this.zipCompressionMethod = zipCompressionMethod; + this.compressionLevel = compressionLevel; + writeStream = GetWriteStream(originalStream); + } + + public override bool CanRead => false; + + public override bool CanSeek => false; + + public override bool CanWrite => true; + + public override long Length => throw new NotSupportedException(); + + public override long Position { get => throw new NotSupportedException(); set => throw new NotSupportedException(); } + + private Stream GetWriteStream(Stream writeStream) + { + counting = new CountingWritableSubStream(writeStream); + Stream output = counting; + switch (zipCompressionMethod) + { + case ZipCompressionMethod.None: + { + return output; + } + case ZipCompressionMethod.Deflate: + { + return new DeflateStream(counting, CompressionMode.Compress, compressionLevel); + } + case ZipCompressionMethod.BZip2: + { + return new BZip2Stream(counting, CompressionMode.Compress, false); + } + case ZipCompressionMethod.LZMA: + { + counting.WriteByte(9); + counting.WriteByte(20); + counting.WriteByte(5); + counting.WriteByte(0); + + LzmaStream lzmaStream = new LzmaStream(new LzmaEncoderProperties(!originalStream.CanSeek), + false, counting); + counting.Write(lzmaStream.Properties, 0, lzmaStream.Properties.Length); + return lzmaStream; + } + case ZipCompressionMethod.PPMd: + { + counting.Write(writer.PpmdProperties.Properties, 0, 2); + return new PpmdStream(writer.PpmdProperties, counting, true); + } + default: + { + throw new NotSupportedException("CompressionMethod: " + zipCompressionMethod); + } + } + } + + protected override void Dispose(bool disposing) + { + if (isDisposed) + { + return; + } + + isDisposed = true; + + base.Dispose(disposing); + if (disposing) + { + writeStream.Dispose(); + + if (limitsExceeded) + { + // We have written invalid data into the archive, + // so we destroy it now, instead of allowing the user to continue + // with a defunct archive + originalStream.Dispose(); + return; + } + + entry.Crc = (uint)crc.Crc32Result; + entry.Compressed = counting.Count; + entry.Decompressed = decompressed; + + var zip64 = entry.Compressed >= uint.MaxValue || entry.Decompressed >= uint.MaxValue; + var compressedvalue = zip64 ? uint.MaxValue : (uint)counting.Count; + var decompressedvalue = zip64 ? uint.MaxValue : (uint)entry.Decompressed; + + if (originalStream.CanSeek) + { + originalStream.Position = (long)(entry.HeaderOffset + 6); + originalStream.WriteByte(0); + + originalStream.Position = (long)(entry.HeaderOffset + 14); + + writer.WriteFooter(entry.Crc, compressedvalue, decompressedvalue); + + // Ideally, we should not throw from Dispose() + // We should not get here as the Write call checks the limits + if (zip64 && entry.Zip64HeaderOffset == 0) + throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option"); + + // If we have pre-allocated space for zip64 data, + // fill it out, even if it is not required + if (entry.Zip64HeaderOffset != 0) + { + originalStream.Position = (long)(entry.HeaderOffset + entry.Zip64HeaderOffset); + originalStream.Write(DataConverter.LittleEndian.GetBytes((ushort)0x0001), 0, 2); + originalStream.Write(DataConverter.LittleEndian.GetBytes((ushort)(8 + 8)), 0, 2); + + originalStream.Write(DataConverter.LittleEndian.GetBytes(entry.Decompressed), 0, 8); + originalStream.Write(DataConverter.LittleEndian.GetBytes(entry.Compressed), 0, 8); + } + + originalStream.Position = writer.streamPosition + (long)entry.Compressed; + writer.streamPosition += (long)entry.Compressed; + } + else + { + // We have a streaming archive, so we should add a post-data-descriptor, + // but we cannot as it does not hold the zip64 values + // Throwing an exception until the zip specification is clarified + + // Ideally, we should not throw from Dispose() + // We should not get here as the Write call checks the limits + if (zip64) + throw new NotSupportedException("Streams larger than 4GiB are not supported for non-seekable streams"); + + originalStream.Write(DataConverter.LittleEndian.GetBytes(ZipHeaderFactory.POST_DATA_DESCRIPTOR), 0, 4); + writer.WriteFooter(entry.Crc, + (uint)compressedvalue, + (uint)decompressedvalue); + writer.streamPosition += (long)entry.Compressed + 16; + } + writer.entries.Add(entry); + } + } + + public override void Flush() + { + writeStream.Flush(); + } + + public override int Read(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + // We check the limits first, because we can keep the archive consistent + // if we can prevent the writes from happening + if (entry.Zip64HeaderOffset == 0) + { + // Pre-check, the counting.Count is not exact, as we do not know the size before having actually compressed it + if (limitsExceeded || ((decompressed + (uint)count) > uint.MaxValue) || (counting.Count + (uint)count) > uint.MaxValue) + throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option"); + } + + decompressed += (uint)count; + crc.SlurpBlock(buffer, offset, count); + writeStream.Write(buffer, offset, count); + + if (entry.Zip64HeaderOffset == 0) + { + // Post-check, this is accurate + if ((decompressed > uint.MaxValue) || counting.Count > uint.MaxValue) + { + // We have written the data, so the archive is now broken + // Throwing the exception here, allows us to avoid + // throwing an exception in Dispose() which is discouraged + // as it can mask other errors + limitsExceeded = true; + throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option"); + } + } + } + } + + #endregion Nested type: ZipWritingStream + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriterEntryOptions.cs b/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriterEntryOptions.cs new file mode 100644 index 0000000000..5f1d8152d1 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriterEntryOptions.cs @@ -0,0 +1,27 @@ +using System; +using SharpCompress.Common; +using SharpCompress.Compressors.Deflate; + +namespace SharpCompress.Writers.Zip +{ + public class ZipWriterEntryOptions + { + public CompressionType? CompressionType { get; set; } + /// + /// When CompressionType.Deflate is used, this property is referenced. Defaults to CompressionLevel.Default. + /// + public CompressionLevel? DeflateCompressionLevel { get; set; } + + public string EntryComment { get; set; } + + public DateTime? ModificationDateTime { get; set; } + + /// + /// Allocate an extra 20 bytes for this entry to store, + /// 64 bit length values, thus enabling streams + /// larger than 4GiB. + /// This option is not supported with non-seekable streams. + /// + public bool? EnableZip64 { get; set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriterOptions.cs b/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriterOptions.cs new file mode 100644 index 0000000000..660e567a73 --- /dev/null +++ b/BizHawk.Client.Common/SharpCompress/Writers/Zip/ZipWriterOptions.cs @@ -0,0 +1,44 @@ +using SharpCompress.Archives; +using SharpCompress.Common; +using SharpCompress.Compressors.Deflate; + +namespace SharpCompress.Writers.Zip +{ + public class ZipWriterOptions : WriterOptions + { + public ZipWriterOptions(CompressionType compressionType) + : base(compressionType) + { + } + + internal ZipWriterOptions(WriterOptions options) + : base(options.CompressionType) + { + LeaveStreamOpen = options.LeaveStreamOpen; + ArchiveEncoding = options.ArchiveEncoding; + + var writerOptions = options as ZipWriterOptions; + if (writerOptions != null) + { + UseZip64 = writerOptions.UseZip64; + DeflateCompressionLevel = writerOptions.DeflateCompressionLevel; + ArchiveComment = writerOptions.ArchiveComment; + } + } + /// + /// When CompressionType.Deflate is used, this property is referenced. Defaults to CompressionLevel.Default. + /// + public CompressionLevel DeflateCompressionLevel { get; set; } = CompressionLevel.Default; + + public string ArchiveComment { get; set; } + + /// + /// Sets a value indicating if zip64 support is enabled. + /// If this is not set, individual stream lengths cannot exceed 4 GiB. + /// This option is not supported for non-seekable streams. + /// Archives larger than 4GiB are supported as long as all streams + /// are less than 4GiB in length. + /// + public bool UseZip64 { get; set; } + } +} \ No newline at end of file diff --git a/BizHawk.Client.Common/SharpCompressArchiveHandler.cs b/BizHawk.Client.Common/SharpCompressArchiveHandler.cs new file mode 100644 index 0000000000..d812e5657e --- /dev/null +++ b/BizHawk.Client.Common/SharpCompressArchiveHandler.cs @@ -0,0 +1,100 @@ +using System.Collections.Generic; +using System.IO; +using System.Linq; +using BizHawk.Common; + +namespace BizHawk.Client.Common +{ + /// + /// Implementation of IHawkFileArchiveHandler using SharpCompress library + /// Pure c# implementation for Mono (although this will work on Windows as well - but probably not as performant as SevenZipSharp) + /// + public class SharpCompressArchiveHandler : IHawkFileArchiveHandler + { + private SharpCompress.Archives.IArchive _archive; + + public void Dispose() + { + if (_archive != null) + { + _archive.Dispose(); + _archive = null; + } + } + + public bool CheckSignature(string fileName, out int offset, out bool isExecutable) + { + offset = 0; + isExecutable = false; + + try + { + var arcTest = SharpCompress.Archives.ArchiveFactory.Open(fileName); + var aType = arcTest.Type; + arcTest.Dispose(); + return true; + } + catch { } + + return false; + } + + public IHawkFileArchiveHandler Construct(string path) + { + var ret = new SharpCompressArchiveHandler(); + ret.Open(path); + return ret; + } + + private void Open(string path) + { + _archive = SharpCompress.Archives.ArchiveFactory.Open(path); + } + + public List Scan() + { + var ret = new List(); + + int idx = 0; + foreach (var i in _archive.Entries) + { + if (i.IsDirectory) + { + continue; + } + + var ai = new HawkFileArchiveItem + { + Name = HawkFile.Util_FixArchiveFilename(i.Key), + Size = (long)i.Size, + ArchiveIndex = idx++, + Index = ret.Count + }; + + ret.Add(ai); + } + + return ret; + } + + public void ExtractFile(int index, Stream stream) + { + int idx = 0; + + foreach (var i in _archive.Entries) + { + if (i.IsDirectory) + continue; + + if (idx++ == index) + { + using (var entryStream = i.OpenEntryStream()) + { + entryStream.CopyTo(stream); + break; + } + } + } + } + } +} \ No newline at end of file diff --git a/BizHawk.Client.EmuHawk/Program.cs b/BizHawk.Client.EmuHawk/Program.cs index 1000510b0d..e8c2056634 100644 --- a/BizHawk.Client.EmuHawk/Program.cs +++ b/BizHawk.Client.EmuHawk/Program.cs @@ -112,7 +112,10 @@ namespace BizHawk.Client.EmuHawk BizHawk.Common.TempFileManager.Start(); - HawkFile.ArchiveHandlerFactory = new SevenZipSharpArchiveHandler(); + if (PlatformLinkedLibSingleton.RunningOnUnix) + HawkFile.ArchiveHandlerFactory = new SharpCompressArchiveHandler(); + else + HawkFile.ArchiveHandlerFactory = new SevenZipSharpArchiveHandler(); ArgParser argParser = new ArgParser(); argParser.ParseArguments(args);